struct net_mat_port *match_nl_get_ports(struct nl_sock *nsd, uint32_t pid, unsigned int ifindex, int family, uint32_t min, uint32_t max) { uint8_t cmd = NET_MAT_PORT_CMD_GET_PORTS; struct nlattr *tb[NET_MAT_MAX+1]; struct net_mat_port *port = NULL; struct match_msg *msg; struct nlmsghdr *nlh; struct nlattr *ports; sigset_t bs; int err = 0; msg = match_nl_alloc_msg(cmd, pid, NLM_F_REQUEST|NLM_F_ACK, 0, family); if (!msg) { MAT_LOG(ERR, "Error: Allocation failure\n"); return NULL; } if (nla_put_u32(msg->nlbuf, NET_MAT_IDENTIFIER_TYPE, NET_MAT_IDENTIFIER_IFINDEX) || nla_put_u32(msg->nlbuf, NET_MAT_IDENTIFIER, ifindex)) { MAT_LOG(ERR, "Error: Identifier put failed\n"); goto out; } err = match_put_rule_error(msg->nlbuf, NET_MAT_RULES_ERROR_CONT_LOG); if (err) goto out; ports = nla_nest_start(msg->nlbuf, NET_MAT_PORTS); if (!ports) { MAT_LOG(ERR, "Error: get_port attributes failed\n"); goto out; } if (min) { err = nla_put_u32(msg->nlbuf, NET_MAT_PORT_MIN_INDEX, min); if (err) goto out; } if (max) { err = nla_put_u32(msg->nlbuf, NET_MAT_PORT_MAX_INDEX, max); if (err) goto out; } nla_nest_end(msg->nlbuf, ports); nl_send_auto(nsd, msg->nlbuf); match_nl_free_msg(msg); /* message sent handle recv */ sigemptyset(&bs); sigaddset(&bs, SIGINT); sigprocmask(SIG_UNBLOCK, &bs, NULL); msg = match_nl_recv_msg(nsd, &err); sigprocmask(SIG_BLOCK, &bs, NULL); if (msg) { nlh = msg->msg; err = genlmsg_parse(nlh, 0, tb, NET_MAT_MAX, match_get_tables_policy); if (err < 0) { MAT_LOG(ERR, "Warning: unable to parse get rules msg\n"); goto out; } if (match_nl_table_cmd_to_type(stdout, true, NET_MAT_PORTS, tb)) goto out; if (tb[NET_MAT_PORTS]) { err = match_get_ports(stdout, verbose, tb[NET_MAT_PORTS], &port); if (err) goto out; } } match_nl_free_msg(msg); return port; out: match_nl_free_msg(msg); return NULL; }
static int fill_ematch_sequence(struct nl_msg *msg, struct nl_list_head *list) { struct rtnl_ematch *e; nl_list_for_each_entry(e, list, e_list) { struct tcf_ematch_hdr match = { .matchid = e->e_id, .kind = e->e_kind, .flags = e->e_flags, }; struct nlattr *attr; int err = 0; if (!(attr = nla_nest_start(msg, e->e_index + 1))) return -NLE_NOMEM; if (nlmsg_append(msg, &match, sizeof(match), 0) < 0) return -NLE_NOMEM; if (e->e_ops->eo_fill) err = e->e_ops->eo_fill(e, msg); else if (e->e_flags & TCF_EM_SIMPLE) err = nlmsg_append(msg, e->e_data, 4, 0); else if (e->e_datalen > 0) err = nlmsg_append(msg, e->e_data, e->e_datalen, 0); NL_DBG(3, "msg %p: added ematch [%d] id=%d kind=%d flags=%d\n", msg, e->e_index, match.matchid, match.kind, match.flags); if (err < 0) return -NLE_NOMEM; nla_nest_end(msg, attr); } nl_list_for_each_entry(e, list, e_list) { if (e->e_kind == TCF_EM_CONTAINER && fill_ematch_sequence(msg, &e->e_childs) < 0) return -NLE_NOMEM; } return 0; } int rtnl_ematch_fill_attr(struct nl_msg *msg, int attrid, struct rtnl_ematch_tree *tree) { struct tcf_ematch_tree_hdr thdr = { .progid = tree->et_progid, }; struct nlattr *list, *topattr; int err, index = 0; /* Assign index number to each ematch to allow for references * to be made while constructing the sequence of matches. */ err = update_container_index(&tree->et_list, &index); if (err < 0) return err; if (!(topattr = nla_nest_start(msg, attrid))) goto nla_put_failure; thdr.nmatches = index; NLA_PUT(msg, TCA_EMATCH_TREE_HDR, sizeof(thdr), &thdr); if (!(list = nla_nest_start(msg, TCA_EMATCH_TREE_LIST))) goto nla_put_failure; if (fill_ematch_sequence(msg, &tree->et_list) < 0) goto nla_put_failure; nla_nest_end(msg, list); nla_nest_end(msg, topattr); return 0; nla_put_failure: return -NLE_NOMEM; } /** @} */ extern int ematch_parse(void *, char **, struct nl_list_head *); int rtnl_ematch_parse_expr(const char *expr, char **errp, struct rtnl_ematch_tree **result) { struct rtnl_ematch_tree *tree; YY_BUFFER_STATE buf = NULL; yyscan_t scanner = NULL; int err; NL_DBG(2, "Parsing ematch expression \"%s\"\n", expr); if (!(tree = rtnl_ematch_tree_alloc(RTNL_EMATCH_PROGID))) return -NLE_FAILURE; if ((err = ematch_lex_init(&scanner)) < 0) { err = -NLE_FAILURE; goto errout; } buf = ematch__scan_string(expr, scanner); if ((err = ematch_parse(scanner, errp, &tree->et_list)) != 0) { ematch__delete_buffer(buf, scanner); err = -NLE_PARSE_ERR; goto errout; } ematch_lex_destroy(scanner); *result = tree; return 0; errout: if (scanner) ematch_lex_destroy(scanner); rtnl_ematch_tree_free(tree); return err; } static const char *layer_txt[] = { [TCF_LAYER_LINK] = "eth", [TCF_LAYER_NETWORK] = "ip", [TCF_LAYER_TRANSPORT] = "tcp", }; char *rtnl_ematch_offset2txt(uint8_t layer, uint16_t offset, char *buf, size_t len) { snprintf(buf, len, "%s+%u", (layer <= TCF_LAYER_MAX) ? layer_txt[layer] : "?", offset); return buf; } static const char *operand_txt[] = { [TCF_EM_OPND_EQ] = "=", [TCF_EM_OPND_LT] = "<", [TCF_EM_OPND_GT] = ">", }; char *rtnl_ematch_opnd2txt(uint8_t opnd, char *buf, size_t len) { snprintf(buf, len, "%s", opnd < ARRAY_SIZE(operand_txt) ? operand_txt[opnd] : "?"); return buf; }
static int vlan_put_attrs(struct nl_msg *msg, struct rtnl_link *link) { struct vlan_info *vi = link->l_info; struct nlattr *data; if (!(data = nla_nest_start(msg, IFLA_INFO_DATA))) return nl_errno(ENOBUFS); if (vi->vi_mask & VLAN_HAS_ID) NLA_PUT_U16(msg, IFLA_VLAN_ID, vi->vi_vlan_id); if (vi->vi_mask & VLAN_HAS_FLAGS) { struct ifla_vlan_flags flags = { .flags = vi->vi_flags, .mask = vi->vi_flags_mask, }; NLA_PUT(msg, IFLA_VLAN_FLAGS, sizeof(flags), &flags); } if (vi->vi_mask & VLAN_HAS_INGRESS_QOS) { struct ifla_vlan_qos_mapping map; struct nlattr *qos; int i; if (!(qos = nla_nest_start(msg, IFLA_VLAN_INGRESS_QOS))) goto nla_put_failure; for (i = 0; i <= VLAN_PRIO_MAX; i++) { if (vi->vi_ingress_qos[i]) { map.from = i; map.to = vi->vi_ingress_qos[i]; NLA_PUT(msg, i, sizeof(map), &map); } } nla_nest_end(msg, qos); } if (vi->vi_mask & VLAN_HAS_EGRESS_QOS) { struct ifla_vlan_qos_mapping map; struct nlattr *qos; int i; if (!(qos = nla_nest_start(msg, IFLA_VLAN_EGRESS_QOS))) goto nla_put_failure; for (i = 0; i < vi->vi_negress; i++) { map.from = vi->vi_egress_qos[i].vm_from; map.to = vi->vi_egress_qos[i].vm_to; NLA_PUT(msg, i, sizeof(map), &map); } nla_nest_end(msg, qos); } nla_nest_end(msg, data); nla_put_failure: return 0; } static struct rtnl_link_info_ops vlan_info_ops = { .io_name = "vlan", .io_alloc = vlan_alloc, .io_parse = vlan_parse, .io_dump[NL_DUMP_BRIEF] = vlan_dump_brief, .io_dump[NL_DUMP_FULL] = vlan_dump_full, .io_clone = vlan_clone, .io_put_attrs = vlan_put_attrs, .io_free = vlan_free, }; int rtnl_link_vlan_set_id(struct rtnl_link *link, int id) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) return nl_error(EOPNOTSUPP, "Not a VLAN link"); vi->vi_vlan_id = id; vi->vi_mask |= VLAN_HAS_ID; return 0; } int rtnl_link_vlan_get_id(struct rtnl_link *link) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) return nl_error(EOPNOTSUPP, "Not a VLAN link"); if (vi->vi_mask & VLAN_HAS_ID) return vi->vi_vlan_id; else return 0; } int rtnl_link_vlan_set_flags(struct rtnl_link *link, unsigned int flags) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) return nl_error(EOPNOTSUPP, "Not a VLAN link"); vi->vi_flags_mask |= flags; vi->vi_flags |= flags; vi->vi_mask |= VLAN_HAS_FLAGS; return 0; } int rtnl_link_vlan_unset_flags(struct rtnl_link *link, unsigned int flags) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) return nl_error(EOPNOTSUPP, "Not a VLAN link"); vi->vi_flags_mask |= flags; vi->vi_flags &= ~flags; vi->vi_mask |= VLAN_HAS_FLAGS; return 0; } unsigned int rtnl_link_vlan_get_flags(struct rtnl_link *link) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) return nl_error(EOPNOTSUPP, "Not a VLAN link"); return vi->vi_flags; } int rtnl_link_vlan_set_ingress_map(struct rtnl_link *link, int from, uint32_t to) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) return nl_error(EOPNOTSUPP, "Not a VLAN link"); if (from < 0 || from > VLAN_PRIO_MAX) return nl_error(EINVAL, "Invalid vlan prio 0..%d", VLAN_PRIO_MAX); vi->vi_ingress_qos[from] = to; vi->vi_mask |= VLAN_HAS_INGRESS_QOS; return 0; } uint32_t *rtnl_link_vlan_get_ingress_map(struct rtnl_link *link) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) { nl_error(EOPNOTSUPP, "Not a VLAN link"); return NULL; } if (vi->vi_mask & VLAN_HAS_INGRESS_QOS) return vi->vi_ingress_qos; else return NULL; } int rtnl_link_vlan_set_egress_map(struct rtnl_link *link, uint32_t from, int to) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) return nl_error(EOPNOTSUPP, "Not a VLAN link"); if (to < 0 || to > VLAN_PRIO_MAX) return nl_error(EINVAL, "Invalid vlan prio 0..%d", VLAN_PRIO_MAX); if (vi->vi_negress >= vi->vi_egress_size) { int new_size = vi->vi_egress_size + 32; void *ptr; ptr = realloc(vi->vi_egress_qos, new_size); if (!ptr) return nl_errno(ENOMEM); vi->vi_egress_qos = ptr; vi->vi_egress_size = new_size; } vi->vi_egress_qos[vi->vi_negress].vm_from = from; vi->vi_egress_qos[vi->vi_negress].vm_to = to; vi->vi_negress++; vi->vi_mask |= VLAN_HAS_EGRESS_QOS; return 0; } struct vlan_map *rtnl_link_vlan_get_egress_map(struct rtnl_link *link, int *negress) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) { nl_error(EOPNOTSUPP, "Not a VLAN link"); return NULL; } if (negress == NULL) { nl_error(EINVAL, "Require pointer to store negress"); return NULL; } if (vi->vi_mask & VLAN_HAS_EGRESS_QOS) { *negress = vi->vi_negress; return vi->vi_egress_qos; } else { *negress = 0; return NULL; } } static void __init vlan_init(void) { rtnl_link_register_info(&vlan_info_ops); } static void __exit vlan_exit(void) { rtnl_link_unregister_info(&vlan_info_ops); }
/* * Create one netlink message for one interface * Contains port and master info as well as carrier and bridge state. */ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *port, u32 pid, u32 seq, int event, unsigned int flags, u32 filter_mask, const struct net_device *dev) { const struct net_bridge *br; struct ifinfomsg *hdr; struct nlmsghdr *nlh; u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; if (port) br = port->br; else br = netdev_priv(dev); br_debug(br, "br_fill_info event %d port %s master %s\n", event, dev->name, br->dev->name); nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); if (nlh == NULL) return -EMSGSIZE; hdr = nlmsg_data(nlh); hdr->ifi_family = AF_BRIDGE; hdr->__ifi_pad = 0; hdr->ifi_type = dev->type; hdr->ifi_index = dev->ifindex; hdr->ifi_flags = dev_get_flags(dev); hdr->ifi_change = 0; if (nla_put_string(skb, IFLA_IFNAME, dev->name) || nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) || nla_put_u32(skb, IFLA_MTU, dev->mtu) || nla_put_u8(skb, IFLA_OPERSTATE, operstate) || (dev->addr_len && nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || (dev->ifindex != dev->iflink && nla_put_u32(skb, IFLA_LINK, dev->iflink))) goto nla_put_failure; if (event == RTM_NEWLINK && port) { struct nlattr *nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED); if (nest == NULL || br_port_fill_attrs(skb, port) < 0) goto nla_put_failure; nla_nest_end(skb, nest); } /* Check if the VID information is requested */ if (filter_mask & RTEXT_FILTER_BRVLAN) { struct nlattr *af; const struct net_port_vlans *pv; struct bridge_vlan_info vinfo; u16 vid; u16 pvid; if (port) pv = nbp_get_vlan_info(port); else pv = br_get_vlan_info(br); if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) goto done; af = nla_nest_start(skb, IFLA_AF_SPEC); if (!af) goto nla_put_failure; pvid = br_get_pvid(pv); for (vid = find_first_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN); vid < BR_VLAN_BITMAP_LEN; vid = find_next_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN, vid+1)) { vinfo.vid = vid; vinfo.flags = 0; if (vid == pvid) vinfo.flags |= BRIDGE_VLAN_INFO_PVID; if (test_bit(vid, pv->untagged_bitmap)) vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED; if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, sizeof(vinfo), &vinfo)) goto nla_put_failure; } nla_nest_end(skb, af); } done: return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; }
static int tca_action_flush(struct net *net, struct nlattr *nla, struct nlmsghdr *n, u32 portid) { struct sk_buff *skb; unsigned char *b; struct nlmsghdr *nlh; struct tcamsg *t; struct netlink_callback dcb; struct nlattr *nest; struct nlattr *tb[TCA_ACT_MAX + 1]; struct nlattr *kind; struct tc_action *a = create_a(0); int err = -ENOMEM; if (a == NULL) { pr_debug("tca_action_flush: couldnt create tc_action\n"); return err; } skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) { pr_debug("tca_action_flush: failed skb alloc\n"); kfree(a); return err; } b = skb_tail_pointer(skb); err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); if (err < 0) goto err_out; err = -EINVAL; kind = tb[TCA_ACT_KIND]; a->ops = tc_lookup_action(kind); if (a->ops == NULL) goto err_out; nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0); if (!nlh) goto out_module_put; t = nlmsg_data(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; nest = nla_nest_start(skb, TCA_ACT_TAB); if (nest == NULL) goto out_module_put; err = a->ops->walk(skb, &dcb, RTM_DELACTION, a); if (err < 0) goto out_module_put; if (err == 0) goto noflush_out; nla_nest_end(skb, nest); nlh->nlmsg_len = skb_tail_pointer(skb) - b; nlh->nlmsg_flags |= NLM_F_ROOT; module_put(a->ops->owner); kfree(a); err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, n->nlmsg_flags & NLM_F_ECHO); if (err > 0) return 0; return err; out_module_put: module_put(a->ops->owner); err_out: noflush_out: kfree_skb(skb); kfree(a); return err; }
static int wl_cfgvendor_gscan_get_batch_results(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len) { int err = 0; struct wl_priv *cfg = wiphy_priv(wiphy); gscan_results_cache_t *results, *iter; uint32 reply_len, complete = 1; int32 mem_needed, num_results_iter; wifi_gscan_result_t *ptr; uint16 num_scan_ids, num_results; struct sk_buff *skb; struct nlattr *scan_hdr, *complete_flag; err = dhd_dev_wait_batch_results_complete(wl_to_prmry_ndev(cfg)); if (err != BCME_OK) return -EBUSY; err = dhd_dev_pno_lock_access_batch_results(wl_to_prmry_ndev(cfg)); if (err != BCME_OK) { WL_ERR(("Can't obtain lock to access batch results %d\n", err)); return -EBUSY; } results = dhd_dev_pno_get_gscan(wl_to_prmry_ndev(cfg), DHD_PNO_GET_BATCH_RESULTS, NULL, &reply_len); if (!results) { WL_ERR(("No results to send %d\n", err)); err = wl_cfgvendor_send_cmd_reply(wiphy, wl_to_prmry_ndev(cfg), results, 0); if (unlikely(err)) WL_ERR(("Vendor Command reply failed ret:%d \n", err)); dhd_dev_pno_unlock_access_batch_results(wl_to_prmry_ndev(cfg)); return err; } num_scan_ids = reply_len & 0xFFFF; num_results = (reply_len & 0xFFFF0000) >> 16; mem_needed = (num_results * sizeof(wifi_gscan_result_t)) + (num_scan_ids * GSCAN_BATCH_RESULT_HDR_LEN) + VENDOR_REPLY_OVERHEAD + SCAN_RESULTS_COMPLETE_FLAG_LEN; if (mem_needed > (int32)NLMSG_DEFAULT_SIZE) { mem_needed = (int32)NLMSG_DEFAULT_SIZE; complete = 0; } WL_TRACE(("complete %d mem_needed %d max_mem %d\n", complete, mem_needed, (int)NLMSG_DEFAULT_SIZE)); /* Alloc the SKB for vendor_event */ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed); if (unlikely(!skb)) { WL_ERR(("skb alloc failed")); dhd_dev_pno_unlock_access_batch_results(wl_to_prmry_ndev(cfg)); return -ENOMEM; } iter = results; complete_flag = nla_reserve(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE, sizeof(complete)); mem_needed = mem_needed - (SCAN_RESULTS_COMPLETE_FLAG_LEN + VENDOR_REPLY_OVERHEAD); while (iter) { num_results_iter = (mem_needed - GSCAN_BATCH_RESULT_HDR_LEN)/sizeof(wifi_gscan_result_t); if (num_results_iter <= 0 || ((iter->tot_count - iter->tot_consumed) > num_results_iter)) break; scan_hdr = nla_nest_start(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS); /* no more room? we are done then (for now) */ if (scan_hdr == NULL) { complete = 0; break; } nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_ID, iter->scan_id); nla_put_u8(skb, GSCAN_ATTRIBUTE_SCAN_FLAGS, iter->flag); num_results_iter = iter->tot_count - iter->tot_consumed; nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_OF_RESULTS, num_results_iter); if (num_results_iter) { ptr = &iter->results[iter->tot_consumed]; iter->tot_consumed += num_results_iter; nla_put(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS, num_results_iter * sizeof(wifi_gscan_result_t), ptr); } nla_nest_end(skb, scan_hdr); mem_needed -= GSCAN_BATCH_RESULT_HDR_LEN + (num_results_iter * sizeof(wifi_gscan_result_t)); iter = iter->next; } memcpy(nla_data(complete_flag), &complete, sizeof(complete)); dhd_dev_gscan_batch_cache_cleanup(wl_to_prmry_ndev(cfg)); dhd_dev_pno_unlock_access_batch_results(wl_to_prmry_ndev(cfg)); return cfg80211_vendor_cmd_reply(skb); }
static int choke_dump(struct Qdisc *sch, struct sk_buff *skb) { struct choke_sched_data *q = qdisc_priv(sch); struct nlattr *opts = NULL; struct tc_red_qopt opt = { .limit = q->limit, .flags = q->flags, .qth_min = q->parms.qth_min >> q->parms.Wlog, .qth_max = q->parms.qth_max >> q->parms.Wlog, .Wlog = q->parms.Wlog, .Plog = q->parms.Plog, .Scell_log = q->parms.Scell_log, }; opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) || nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P)) goto nla_put_failure; return nla_nest_end(skb, opts); nla_put_failure: nla_nest_cancel(skb, opts); return -EMSGSIZE; } static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct choke_sched_data *q = qdisc_priv(sch); struct tc_choke_xstats st = { .early = q->stats.prob_drop + q->stats.forced_drop, .marked = q->stats.prob_mark + q->stats.forced_mark, .pdrop = q->stats.pdrop, .other = q->stats.other, .matched = q->stats.matched, }; return gnet_stats_copy_app(d, &st, sizeof(st)); } static void choke_destroy(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); choke_free(q->tab); } static struct sk_buff *choke_peek_head(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); return (q->head != q->tail) ? q->tab[q->head] : NULL; } static struct Qdisc_ops choke_qdisc_ops __read_mostly = { .id = "choke", .priv_size = sizeof(struct choke_sched_data), .enqueue = choke_enqueue, .dequeue = choke_dequeue, .peek = choke_peek_head, .init = choke_init, .destroy = choke_destroy, .reset = choke_reset, .change = choke_change, .dump = choke_dump, .dump_stats = choke_dump_stats, .owner = THIS_MODULE, }; static int __init choke_module_init(void) { return register_qdisc(&choke_qdisc_ops); } static void __exit choke_module_exit(void) { unregister_qdisc(&choke_qdisc_ops); } module_init(choke_module_init) module_exit(choke_module_exit) MODULE_LICENSE("GPL");
TError TNlLink::AddXVlan(const std::string &vlantype, const std::string &master, uint32_t type, const std::string &hw, int mtu) { TError error = TError::Success(); int ret; uint32_t masterIdx; struct nl_msg *msg; struct nlattr *linkinfo, *infodata; struct ifinfomsg ifi = { 0 }; struct ether_addr *ea = nullptr; auto Name = GetName(); if (hw.length()) { // FIXME THREADS ea = ether_aton(hw.c_str()); if (!ea) return TError(EError::Unknown, "Invalid " + vlantype + " mac address " + hw); } TNlLink masterLink(Nl, master); error = masterLink.Load(); if (error) return error; masterIdx = masterLink.GetIndex(); msg = nlmsg_alloc_simple(RTM_NEWLINK, NLM_F_CREATE); if (!msg) return TError(EError::Unknown, "Unable to add " + vlantype + ": no memory"); ret = nlmsg_append(msg, &ifi, sizeof(ifi), NLMSG_ALIGNTO); if (ret < 0) { error = TError(EError::Unknown, "Unable to add " + vlantype + ": " + nl_geterror(ret)); goto free_msg; } /* link configuration */ ret = nla_put(msg, IFLA_LINK, sizeof(uint32_t), &masterIdx); if (ret < 0) { error = TError(EError::Unknown, std::string("Unable to put IFLA_LINK: ") + nl_geterror(ret)); goto free_msg; } ret = nla_put(msg, IFLA_IFNAME, Name.length() + 1, Name.c_str()); if (ret < 0) { error = TError(EError::Unknown, std::string("Unable to put IFLA_IFNAME: ") + nl_geterror(ret)); goto free_msg; } if (mtu > 0) { ret = nla_put(msg, IFLA_MTU, sizeof(int), &mtu); if (ret < 0) { error = TError(EError::Unknown, std::string("Unable to put IFLA_MTU: ") + nl_geterror(ret)); goto free_msg; } } if (ea) { struct nl_addr *addr = nl_addr_build(AF_LLC, ea, ETH_ALEN); ret = nla_put(msg, IFLA_ADDRESS, nl_addr_get_len(addr), nl_addr_get_binary_addr(addr)); if (ret < 0) { error = TError(EError::Unknown, std::string("Unable to put IFLA_ADDRESS: ") + nl_geterror(ret)); goto free_msg; } nl_addr_put(addr); } /* link type */ linkinfo = nla_nest_start(msg, IFLA_LINKINFO); if (!linkinfo) { error = TError(EError::Unknown, "Unable to add " + vlantype + ": can't nest IFLA_LINKINFO"); goto free_msg; } ret = nla_put(msg, IFLA_INFO_KIND, vlantype.length() + 1, vlantype.c_str()); if (ret < 0) { error = TError(EError::Unknown, std::string("Unable to put IFLA_INFO_KIND: ") + nl_geterror(ret)); goto free_msg; } /* xvlan specific */ infodata = nla_nest_start(msg, IFLA_INFO_DATA); if (!infodata) { error = TError(EError::Unknown, "Unable to add " + vlantype + ": can't nest IFLA_INFO_DATA"); goto free_msg; } if (vlantype == "macvlan") { ret = nla_put(msg, IFLA_MACVLAN_MODE, sizeof(uint32_t), &type); if (ret < 0) { error = TError(EError::Unknown, std::string("Unable to put IFLA_MACVLAN_MODE: ") + nl_geterror(ret)); goto free_msg; } #ifdef IFLA_IPVLAN_MAX } else if (vlantype == "ipvlan") { uint16_t mode = type; ret = nla_put(msg, IFLA_IPVLAN_MODE, sizeof(uint16_t), &mode); if (ret < 0) { error = TError(EError::Unknown, std::string("Unable to put IFLA_IPVLAN_MODE: ") + nl_geterror(ret)); goto free_msg; } #endif } nla_nest_end(msg, infodata); nla_nest_end(msg, linkinfo); L() << "netlink: add " << vlantype << " " << Name << " master " << master << " type " << type << " hw " << hw << " mtu " << mtu << std::endl; ret = nl_send_sync(GetSock(), msg); if (ret) return Error(ret, "Cannot add " + vlantype); return Load(); free_msg: nlmsg_free(msg); return error; }
static int vlan_put_attrs(struct nl_msg *msg, struct rtnl_link *link) { struct vlan_info *vi = link->l_info; struct nlattr *data; if (!(data = nla_nest_start(msg, IFLA_INFO_DATA))) return -NLE_MSGSIZE; if (vi->vi_mask & VLAN_HAS_ID) NLA_PUT_U16(msg, IFLA_VLAN_ID, vi->vi_vlan_id); if (vi->vi_mask & VLAN_HAS_FLAGS) { struct ifla_vlan_flags flags = { .flags = vi->vi_flags, .mask = vi->vi_flags_mask, }; NLA_PUT(msg, IFLA_VLAN_FLAGS, sizeof(flags), &flags); } if (vi->vi_mask & VLAN_HAS_INGRESS_QOS) { struct ifla_vlan_qos_mapping map; struct nlattr *qos; int i; if (!(qos = nla_nest_start(msg, IFLA_VLAN_INGRESS_QOS))) goto nla_put_failure; for (i = 0; i <= VLAN_PRIO_MAX; i++) { if (vi->vi_ingress_qos[i]) { map.from = i; map.to = vi->vi_ingress_qos[i]; NLA_PUT(msg, i, sizeof(map), &map); } } nla_nest_end(msg, qos); } if (vi->vi_mask & VLAN_HAS_EGRESS_QOS) { struct ifla_vlan_qos_mapping map; struct nlattr *qos; int i; if (!(qos = nla_nest_start(msg, IFLA_VLAN_EGRESS_QOS))) goto nla_put_failure; for (i = 0; i < vi->vi_negress; i++) { map.from = vi->vi_egress_qos[i].vm_from; map.to = vi->vi_egress_qos[i].vm_to; NLA_PUT(msg, i, sizeof(map), &map); } nla_nest_end(msg, qos); } nla_nest_end(msg, data); nla_put_failure: return 0; } static struct rtnl_link_info_ops vlan_info_ops = { .io_name = "vlan", .io_alloc = vlan_alloc, .io_parse = vlan_parse, .io_dump = { [NL_DUMP_LINE] = vlan_dump_line, [NL_DUMP_DETAILS] = vlan_dump_details, }, .io_clone = vlan_clone,
static int handle_wowlan_enable(struct nl80211_state *state, struct nl_msg *msg, int argc, char **argv, enum id_input id) { struct nlattr *wowlan, *pattern; struct nl_msg *patterns = NULL; enum { PS_REG, PS_PAT, } parse_state = PS_REG; int err = -ENOBUFS; unsigned char *pat, *mask; size_t patlen; int patnum = 0, pkt_offset; char *eptr, *value1, *value2, *sptr = NULL; wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS); if (!wowlan) return -ENOBUFS; while (argc) { switch (parse_state) { case PS_REG: if (strcmp(argv[0], "any") == 0) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY); else if (strcmp(argv[0], "disconnect") == 0) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT); else if (strcmp(argv[0], "magic-packet") == 0) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT); else if (strcmp(argv[0], "gtk-rekey-failure") == 0) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE); else if (strcmp(argv[0], "eap-identity-request") == 0) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST); else if (strcmp(argv[0], "4way-handshake") == 0) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE); else if (strcmp(argv[0], "rfkill-release") == 0) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE); else if (strcmp(argv[0], "tcp") == 0) { argv++; argc--; if (!argc) { err = 1; goto nla_put_failure; } err = wowlan_parse_tcp_file(msg, argv[0]); if (err) goto nla_put_failure; } else if (strcmp(argv[0], "patterns") == 0) { parse_state = PS_PAT; patterns = nlmsg_alloc(); if (!patterns) { err = -ENOMEM; goto nla_put_failure; } } else if (strcmp(argv[0], "net-detect") == 0) { argv++; argc--; if (!argc) { err = 1; goto nla_put_failure; } err = wowlan_parse_net_detect(msg, &argc, &argv); if (err) goto nla_put_failure; continue; } else { err = 1; goto nla_put_failure; } break; case PS_PAT: value1 = strtok_r(argv[0], "+", &sptr); value2 = strtok_r(NULL, "+", &sptr); if (!value2) { pkt_offset = 0; value2 = value1; } else { pkt_offset = strtoul(value1, &eptr, 10); if (eptr != value1 + strlen(value1)) { err = 1; goto nla_put_failure; } } if (parse_hex_mask(value2, &pat, &patlen, &mask)) { err = 1; goto nla_put_failure; } pattern = nla_nest_start(patterns, ++patnum); NLA_PUT(patterns, NL80211_PKTPAT_MASK, DIV_ROUND_UP(patlen, 8), mask); NLA_PUT(patterns, NL80211_PKTPAT_PATTERN, patlen, pat); NLA_PUT_U32(patterns, NL80211_PKTPAT_OFFSET, pkt_offset); nla_nest_end(patterns, pattern); free(mask); free(pat); break; } argv++; argc--; } if (patterns) nla_put_nested(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, patterns); nla_nest_end(msg, wowlan); err = 0; nla_put_failure: nlmsg_free(patterns); return err; }
struct ip_vs_get_dests *ipvs_get_dests(ipvs_service_entry_t *svc) { struct ip_vs_get_dests *d; struct ip_vs_get_dests_kern *dk; socklen_t len; int i; len = sizeof(*d) + sizeof(ipvs_dest_entry_t) * svc->num_dests; if (!(d = malloc(len))) return NULL; ipvs_func = ipvs_get_dests; #ifdef LIBIPVS_USE_NL if (try_nl) { struct nl_msg *msg; struct nlattr *nl_service; if (svc->num_dests == 0) d = realloc(d,sizeof(*d) + sizeof(ipvs_dest_entry_t)); d->fwmark = svc->fwmark; d->protocol = svc->protocol; d->addr = svc->addr; d->port = svc->port; d->num_dests = svc->num_dests; d->af = svc->af; msg = ipvs_nl_message(IPVS_CMD_GET_DEST, NLM_F_DUMP); if (!msg) goto ipvs_nl_dest_failure; nl_service = nla_nest_start(msg, IPVS_CMD_ATTR_SERVICE); if (!nl_service) goto nla_put_failure; NLA_PUT_U16(msg, IPVS_SVC_ATTR_AF, svc->af); if (svc->fwmark) { NLA_PUT_U32(msg, IPVS_SVC_ATTR_FWMARK, svc->fwmark); } else { NLA_PUT_U16(msg, IPVS_SVC_ATTR_PROTOCOL, svc->protocol); NLA_PUT(msg, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr); NLA_PUT_U16(msg, IPVS_SVC_ATTR_PORT, svc->port); } nla_nest_end(msg, nl_service); if (ipvs_nl_send_message(msg, ipvs_dests_parse_cb, &d)) goto ipvs_nl_dest_failure; return d; nla_put_failure: nlmsg_free(msg); ipvs_nl_dest_failure: free(d); return NULL; } #endif if (svc->af != AF_INET) { errno = EAFNOSUPPORT; free(d); return NULL; } len = sizeof(*dk) + sizeof(struct ip_vs_dest_entry_kern) * svc->num_dests; if (!(dk = malloc(len))) { free(d); return NULL; } dk->fwmark = svc->fwmark; dk->protocol = svc->protocol; dk->addr = svc->addr.ip; dk->port = svc->port; dk->num_dests = svc->num_dests; if (getsockopt(sockfd, IPPROTO_IP, IP_VS_SO_GET_DESTS, dk, &len) < 0) { free(d); free(dk); return NULL; } memcpy(d, dk, sizeof(struct ip_vs_get_dests_kern)); d->af = AF_INET; d->addr.ip = d->__addr_v4; for (i = 0; i < dk->num_dests; i++) { memcpy(&d->entrytable[i], &dk->entrytable[i], sizeof(struct ip_vs_dest_entry_kern)); d->entrytable[i].af = AF_INET; d->entrytable[i].addr.ip = d->entrytable[i].__addr_v4; } free(dk); return d; }
static int wowlan_parse_tcp_file(struct nl_msg *msg, const char *fn) { char buf[16768]; int err = 1; FILE *f = fopen(fn, "r"); struct nlattr *tcp; if (!f) return 1; tcp = nla_nest_start(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION); if (!tcp) goto nla_put_failure; while (!feof(f)) { char *eol; if (!fgets(buf, sizeof(buf), f)) break; eol = strchr(buf + 5, '\r'); if (eol) *eol = 0; eol = strchr(buf + 5, '\n'); if (eol) *eol = 0; if (strncmp(buf, "source=", 7) == 0) { struct in_addr in_addr; char *addr = buf + 7; char *port = strchr(buf + 7, ':'); if (port) { *port = 0; port++; } if (inet_aton(addr, &in_addr) == 0) goto close; NLA_PUT_U32(msg, NL80211_WOWLAN_TCP_SRC_IPV4, in_addr.s_addr); if (port) NLA_PUT_U16(msg, NL80211_WOWLAN_TCP_SRC_PORT, atoi(port)); } else if (strncmp(buf, "dest=", 5) == 0) { struct in_addr in_addr; char *addr = buf + 5; char *port = strchr(buf + 5, ':'); char *mac; unsigned char macbuf[6]; if (!port) goto close; *port = 0; port++; mac = strchr(port, '@'); if (!mac) goto close; *mac = 0; mac++; if (inet_aton(addr, &in_addr) == 0) goto close; NLA_PUT_U32(msg, NL80211_WOWLAN_TCP_DST_IPV4, in_addr.s_addr); NLA_PUT_U16(msg, NL80211_WOWLAN_TCP_DST_PORT, atoi(port)); if (mac_addr_a2n(macbuf, mac)) goto close; NLA_PUT(msg, NL80211_WOWLAN_TCP_DST_MAC, 6, macbuf); } else if (strncmp(buf, "data=", 5) == 0) { size_t len; unsigned char *pkt = parse_hex(buf + 5, &len); if (!pkt) goto close; NLA_PUT(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD, len, pkt); free(pkt); } else if (strncmp(buf, "data.interval=", 14) == 0) { NLA_PUT_U32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL, atoi(buf + 14)); } else if (strncmp(buf, "wake=", 5) == 0) { unsigned char *pat, *mask; size_t patlen; if (parse_hex_mask(buf + 5, &pat, &patlen, &mask)) goto close; NLA_PUT(msg, NL80211_WOWLAN_TCP_WAKE_MASK, DIV_ROUND_UP(patlen, 8), mask); NLA_PUT(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD, patlen, pat); free(mask); free(pat); } else if (strncmp(buf, "data.seq=", 9) == 0) { struct nl80211_wowlan_tcp_data_seq seq = {}; char *len, *offs, *start; len = buf + 9; offs = strchr(len, ','); if (!offs) goto close; *offs = 0; offs++; start = strchr(offs, ','); if (start) { *start = 0; start++; seq.start = atoi(start); } seq.len = atoi(len); seq.offset = atoi(offs); NLA_PUT(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ, sizeof(seq), &seq); } else if (strncmp(buf, "data.tok=", 9) == 0) { struct nl80211_wowlan_tcp_data_token *tok; size_t stream_len; char *len, *offs, *toks; unsigned char *stream; len = buf + 9; offs = strchr(len, ','); if (!offs) goto close; *offs = 0; offs++; toks = strchr(offs, ','); if (!toks) goto close; *toks = 0; toks++; stream = parse_hex(toks, &stream_len); if (!stream) goto close; tok = malloc(sizeof(*tok) + stream_len); if (!tok) { free(stream); err = -ENOMEM; goto close; } tok->len = atoi(len); tok->offset = atoi(offs); memcpy(tok->token_stream, stream, stream_len); NLA_PUT(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN, sizeof(*tok) + stream_len, tok); free(stream); free(tok); } else { if (buf[0] == '#') continue; goto close; } } err = 0; goto close; nla_put_failure: err = -ENOBUFS; close: fclose(f); nla_nest_end(msg, tcp); return err; }
static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, struct l2tp_session *session) { void *hdr; struct nlattr *nest; struct l2tp_tunnel *tunnel = session->tunnel; struct sock *sk = NULL; struct l2tp_stats stats; unsigned int start; sk = tunnel->sock; hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET); if (IS_ERR(hdr)) return PTR_ERR(hdr); if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) || nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) || nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id) || nla_put_u32(skb, L2TP_ATTR_DEBUG, session->debug) || nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype) || nla_put_u16(skb, L2TP_ATTR_MTU, session->mtu) || (session->mru && nla_put_u16(skb, L2TP_ATTR_MRU, session->mru))) goto nla_put_failure; if ((session->ifname && session->ifname[0] && nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) || (session->cookie_len && nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0])) || (session->peer_cookie_len && nla_put(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, &session->peer_cookie[0])) || nla_put_u8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq) || nla_put_u8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq) || nla_put_u8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode) || #ifdef CONFIG_XFRM (((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) && nla_put_u8(skb, L2TP_ATTR_USING_IPSEC, 1)) || #endif (session->reorder_timeout && nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout))) goto nla_put_failure; nest = nla_nest_start(skb, L2TP_ATTR_STATS); if (nest == NULL) goto nla_put_failure; do { start = u64_stats_fetch_begin(&session->stats.syncp); stats.tx_packets = session->stats.tx_packets; stats.tx_bytes = session->stats.tx_bytes; stats.tx_errors = session->stats.tx_errors; stats.rx_packets = session->stats.rx_packets; stats.rx_bytes = session->stats.rx_bytes; stats.rx_errors = session->stats.rx_errors; stats.rx_seq_discards = session->stats.rx_seq_discards; stats.rx_oos_packets = session->stats.rx_oos_packets; } while (u64_stats_fetch_retry(&session->stats.syncp, start)); if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) || nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) || nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) || nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) || nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) || nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, stats.rx_seq_discards) || nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, stats.rx_oos_packets) || nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) goto nla_put_failure; nla_nest_end(skb, nest); return genlmsg_end(skb, hdr); nla_put_failure: genlmsg_cancel(skb, hdr); return -1; }
static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, struct l2tp_tunnel *tunnel) { void *hdr; struct nlattr *nest; struct sock *sk = NULL; struct inet_sock *inet; #if IS_ENABLED(CONFIG_IPV6) struct ipv6_pinfo *np = NULL; #endif struct l2tp_stats stats; unsigned int start; hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_TUNNEL_GET); if (IS_ERR(hdr)) return PTR_ERR(hdr); if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) || nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) || nla_put_u32(skb, L2TP_ATTR_DEBUG, tunnel->debug) || nla_put_u16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap)) goto nla_put_failure; nest = nla_nest_start(skb, L2TP_ATTR_STATS); if (nest == NULL) goto nla_put_failure; do { start = u64_stats_fetch_begin(&tunnel->stats.syncp); stats.tx_packets = tunnel->stats.tx_packets; stats.tx_bytes = tunnel->stats.tx_bytes; stats.tx_errors = tunnel->stats.tx_errors; stats.rx_packets = tunnel->stats.rx_packets; stats.rx_bytes = tunnel->stats.rx_bytes; stats.rx_errors = tunnel->stats.rx_errors; stats.rx_seq_discards = tunnel->stats.rx_seq_discards; stats.rx_oos_packets = tunnel->stats.rx_oos_packets; } while (u64_stats_fetch_retry(&tunnel->stats.syncp, start)); if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) || nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) || nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) || nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) || nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) || nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, stats.rx_seq_discards) || nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, stats.rx_oos_packets) || nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) goto nla_put_failure; nla_nest_end(skb, nest); sk = tunnel->sock; if (!sk) goto out; #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == AF_INET6) np = inet6_sk(sk); #endif inet = inet_sk(sk); switch (tunnel->encap) { case L2TP_ENCAPTYPE_UDP: if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) || nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)) || nla_put_u8(skb, L2TP_ATTR_UDP_CSUM, (sk->sk_no_check != UDP_CSUM_NOXMIT))) goto nla_put_failure; /* NOBREAK */ case L2TP_ENCAPTYPE_IP: #if IS_ENABLED(CONFIG_IPV6) if (np) { if (nla_put(skb, L2TP_ATTR_IP6_SADDR, sizeof(np->saddr), &np->saddr) || nla_put(skb, L2TP_ATTR_IP6_DADDR, sizeof(np->daddr), &np->daddr)) goto nla_put_failure; } else #endif if (nla_put_be32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr) || nla_put_be32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr)) goto nla_put_failure; break; } out: return genlmsg_end(skb, hdr); nla_put_failure: genlmsg_cancel(skb, hdr); return -1; }
static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb, const struct dp_upcall_info *upcall_info) { struct ovs_header *upcall; struct sk_buff *nskb = NULL; struct sk_buff *user_skb; /* to be queued to userspace */ struct nlattr *nla; unsigned int len; int err; if (vlan_tx_tag_present(skb)) { nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return -ENOMEM; nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb)); if (!nskb) return -ENOMEM; nskb->vlan_tci = 0; skb = nskb; } if (nla_attr_size(skb->len) > USHRT_MAX) { err = -EFBIG; goto out; } len = sizeof(struct ovs_header); len += nla_total_size(skb->len); len += nla_total_size(FLOW_BUFSIZE); if (upcall_info->cmd == OVS_PACKET_CMD_ACTION) len += nla_total_size(8); user_skb = genlmsg_new(len, GFP_ATOMIC); if (!user_skb) { err = -ENOMEM; goto out; } upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 0, upcall_info->cmd); upcall->dp_ifindex = dp_ifindex; nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); ovs_flow_to_nlattrs(upcall_info->key, user_skb); nla_nest_end(user_skb, nla); if (upcall_info->userdata) nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA, nla_get_u64(upcall_info->userdata)); nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len); skb_copy_and_csum_dev(skb, nla_data(nla)); err = genlmsg_unicast(&init_net, user_skb, upcall_info->pid); out: kfree_skb(nskb); return err; }
static int wl_cfgvendor_gscan_get_batch_results(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len) { int err = 0; struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); gscan_results_cache_t *results, *iter; uint32 reply_len, complete = 0, num_results_iter; int32 mem_needed; wifi_gscan_result_t *ptr; uint16 num_scan_ids, num_results; struct sk_buff *skb; struct nlattr *scan_hdr; dhd_dev_wait_batch_results_complete(bcmcfg_to_prmry_ndev(cfg)); dhd_dev_pno_lock_access_batch_results(bcmcfg_to_prmry_ndev(cfg)); results = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg), DHD_PNO_GET_BATCH_RESULTS, NULL, &reply_len); if (!results) { WL_ERR(("No results to send %d\n", err)); err = rtw_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg), results, 0); if (unlikely(err)) WL_ERR(("Vendor Command reply failed ret:%d \n", err)); dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg)); return err; } num_scan_ids = reply_len & 0xFFFF; num_results = (reply_len & 0xFFFF0000) >> 16; mem_needed = (num_results * sizeof(wifi_gscan_result_t)) + (num_scan_ids * GSCAN_BATCH_RESULT_HDR_LEN) + VENDOR_REPLY_OVERHEAD + SCAN_RESULTS_COMPLETE_FLAG_LEN; if (mem_needed > (int32)NLMSG_DEFAULT_SIZE) { mem_needed = (int32)NLMSG_DEFAULT_SIZE; complete = 0; } else { complete = 1; } WL_TRACE(("complete %d mem_needed %d max_mem %d\n", complete, mem_needed, (int)NLMSG_DEFAULT_SIZE)); /* Alloc the SKB for vendor_event */ skb = rtw_cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed); if (unlikely(!skb)) { WL_ERR(("skb alloc failed")); dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg)); return -ENOMEM; } iter = results; nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE, complete); mem_needed = mem_needed - (SCAN_RESULTS_COMPLETE_FLAG_LEN + VENDOR_REPLY_OVERHEAD); while (iter && ((mem_needed - GSCAN_BATCH_RESULT_HDR_LEN) > 0)) { scan_hdr = nla_nest_start(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS); nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_ID, iter->scan_id); nla_put_u8(skb, GSCAN_ATTRIBUTE_SCAN_FLAGS, iter->flag); num_results_iter = (mem_needed - GSCAN_BATCH_RESULT_HDR_LEN)/sizeof(wifi_gscan_result_t); if ((iter->tot_count - iter->tot_consumed) < num_results_iter) num_results_iter = iter->tot_count - iter->tot_consumed; nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_OF_RESULTS, num_results_iter); if (num_results_iter) { ptr = &iter->results[iter->tot_consumed]; iter->tot_consumed += num_results_iter; nla_put(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS, num_results_iter * sizeof(wifi_gscan_result_t), ptr); } nla_nest_end(skb, scan_hdr); mem_needed -= GSCAN_BATCH_RESULT_HDR_LEN + (num_results_iter * sizeof(wifi_gscan_result_t)); iter = iter->next; } dhd_dev_gscan_batch_cache_cleanup(bcmcfg_to_prmry_ndev(cfg)); dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg)); return rtw_cfg80211_vendor_cmd_reply(skb); }
static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) { struct gred_sched *table = qdisc_priv(sch); struct nlattr *parms, *vqs, *opts = NULL; int i; u32 max_p[MAX_DPs]; struct tc_gred_sopt sopt = { .DPs = table->DPs, .def_DP = table->def, .grio = gred_rio_mode(table), .flags = table->red_flags, }; if (gred_offload_dump_stats(sch)) goto nla_put_failure; opts = nla_nest_start_noflag(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt)) goto nla_put_failure; for (i = 0; i < MAX_DPs; i++) { struct gred_sched_data *q = table->tab[i]; max_p[i] = q ? q->parms.max_P : 0; } if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit)) goto nla_put_failure; /* Old style all-in-one dump of VQs */ parms = nla_nest_start_noflag(skb, TCA_GRED_PARMS); if (parms == NULL) goto nla_put_failure; for (i = 0; i < MAX_DPs; i++) { struct gred_sched_data *q = table->tab[i]; struct tc_gred_qopt opt; unsigned long qavg; memset(&opt, 0, sizeof(opt)); if (!q) { /* hack -- fix at some point with proper message This is how we indicate to tc that there is no VQ at this DP */ opt.DP = MAX_DPs + i; goto append_opt; } opt.limit = q->limit; opt.DP = q->DP; opt.backlog = gred_backlog(table, q, sch); opt.prio = q->prio; opt.qth_min = q->parms.qth_min >> q->parms.Wlog; opt.qth_max = q->parms.qth_max >> q->parms.Wlog; opt.Wlog = q->parms.Wlog; opt.Plog = q->parms.Plog; opt.Scell_log = q->parms.Scell_log; opt.other = q->stats.other; opt.early = q->stats.prob_drop; opt.forced = q->stats.forced_drop; opt.pdrop = q->stats.pdrop; opt.packets = q->packetsin; opt.bytesin = q->bytesin; if (gred_wred_mode(table)) gred_load_wred_set(table, q); qavg = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg >> q->parms.Wlog); opt.qave = qavg >> q->parms.Wlog; append_opt: if (nla_append(skb, sizeof(opt), &opt) < 0) goto nla_put_failure; } nla_nest_end(skb, parms); /* Dump the VQs again, in more structured way */ vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST); if (!vqs) goto nla_put_failure; for (i = 0; i < MAX_DPs; i++) { struct gred_sched_data *q = table->tab[i]; struct nlattr *vq; if (!q) continue; vq = nla_nest_start_noflag(skb, TCA_GRED_VQ_ENTRY); if (!vq) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags)) goto nla_put_failure; /* Stats */ if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin, TCA_GRED_VQ_PAD)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG, gred_backlog(table, q, sch))) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP, q->stats.prob_drop)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK, q->stats.prob_mark)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP, q->stats.forced_drop)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK, q->stats.forced_mark)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other)) goto nla_put_failure; nla_nest_end(skb, vq); } nla_nest_end(skb, vqs); return nla_nest_end(skb, opts); nla_put_failure: nla_nest_cancel(skb, opts); return -EMSGSIZE; } static void gred_destroy(struct Qdisc *sch) { struct gred_sched *table = qdisc_priv(sch); int i; for (i = 0; i < table->DPs; i++) { if (table->tab[i]) gred_destroy_vq(table->tab[i]); } gred_offload(sch, TC_GRED_DESTROY); } static struct Qdisc_ops gred_qdisc_ops __read_mostly = { .id = "gred", .priv_size = sizeof(struct gred_sched), .enqueue = gred_enqueue, .dequeue = gred_dequeue, .peek = qdisc_peek_head, .init = gred_init, .reset = gred_reset, .destroy = gred_destroy, .change = gred_change, .dump = gred_dump, .owner = THIS_MODULE, }; static int __init gred_module_init(void) { return register_qdisc(&gred_qdisc_ops); } static void __exit gred_module_exit(void) { unregister_qdisc(&gred_qdisc_ops); } module_init(gred_module_init) module_exit(gred_module_exit) MODULE_LICENSE("GPL");
static int nfnl_ct_build_tuple(struct nl_msg *msg, const struct nfnl_ct *ct, int repl) { struct nlattr *tuple, *ip, *proto; struct nl_addr *addr; int family; family = nfnl_ct_get_family(ct); tuple = nla_nest_start(msg, repl ? CTA_TUPLE_REPLY : CTA_TUPLE_ORIG); if (!tuple) goto nla_put_failure; ip = nla_nest_start(msg, CTA_TUPLE_IP); if (!ip) goto nla_put_failure; addr = nfnl_ct_get_src(ct, repl); if (addr) NLA_PUT_ADDR(msg, family == AF_INET ? CTA_IP_V4_SRC : CTA_IP_V6_SRC, addr); addr = nfnl_ct_get_dst(ct, repl); if (addr) NLA_PUT_ADDR(msg, family == AF_INET ? CTA_IP_V4_DST : CTA_IP_V6_DST, addr); nla_nest_end(msg, ip); proto = nla_nest_start(msg, CTA_TUPLE_PROTO); if (!proto) goto nla_put_failure; if (nfnl_ct_test_proto(ct)) NLA_PUT_U8(msg, CTA_PROTO_NUM, nfnl_ct_get_proto(ct)); if (nfnl_ct_test_src_port(ct, repl)) NLA_PUT_U16(msg, CTA_PROTO_SRC_PORT, htons(nfnl_ct_get_src_port(ct, repl))); if (nfnl_ct_test_dst_port(ct, repl)) NLA_PUT_U16(msg, CTA_PROTO_DST_PORT, htons(nfnl_ct_get_dst_port(ct, repl))); if (family == AF_INET) { if (nfnl_ct_test_icmp_id(ct, repl)) NLA_PUT_U16(msg, CTA_PROTO_ICMP_ID, htons(nfnl_ct_get_icmp_id(ct, repl))); if (nfnl_ct_test_icmp_type(ct, repl)) NLA_PUT_U8(msg, CTA_PROTO_ICMP_TYPE, nfnl_ct_get_icmp_type(ct, repl)); if (nfnl_ct_test_icmp_code(ct, repl)) NLA_PUT_U8(msg, CTA_PROTO_ICMP_CODE, nfnl_ct_get_icmp_code(ct, repl)); } else if (family == AF_INET6) { if (nfnl_ct_test_icmp_id(ct, repl)) NLA_PUT_U16(msg, CTA_PROTO_ICMPV6_ID, htons(nfnl_ct_get_icmp_id(ct, repl))); if (nfnl_ct_test_icmp_type(ct, repl)) NLA_PUT_U8(msg, CTA_PROTO_ICMPV6_TYPE, nfnl_ct_get_icmp_type(ct, repl)); if (nfnl_ct_test_icmp_code(ct, repl)) NLA_PUT_U8(msg, CTA_PROTO_ICMPV6_CODE, nfnl_ct_get_icmp_code(ct, repl)); } nla_nest_end(msg, proto); nla_nest_end(msg, tuple); return 0; nla_put_failure: return -NLE_MSGSIZE; }
static int nfnl_exp_build_tuple(struct nl_msg *msg, const struct nfnl_exp *exp, int cta) { struct nlattr *tuple, *ip, *proto; struct nl_addr *addr; int family; family = nfnl_exp_get_family(exp); int type = exp_get_tuple_attr(cta); if (cta == CTA_EXPECT_NAT) tuple = nla_nest_start(msg, CTA_EXPECT_NAT_TUPLE); else tuple = nla_nest_start(msg, cta); if (!tuple) goto nla_put_failure; ip = nla_nest_start(msg, CTA_TUPLE_IP); if (!ip) goto nla_put_failure; addr = nfnl_exp_get_src(exp, type); if (addr) NLA_PUT_ADDR(msg, family == AF_INET ? CTA_IP_V4_SRC : CTA_IP_V6_SRC, addr); addr = nfnl_exp_get_dst(exp, type); if (addr) NLA_PUT_ADDR(msg, family == AF_INET ? CTA_IP_V4_DST : CTA_IP_V6_DST, addr); nla_nest_end(msg, ip); proto = nla_nest_start(msg, CTA_TUPLE_PROTO); if (!proto) goto nla_put_failure; if (nfnl_exp_test_l4protonum(exp, type)) NLA_PUT_U8(msg, CTA_PROTO_NUM, nfnl_exp_get_l4protonum(exp, type)); if (nfnl_exp_test_ports(exp, type)) { NLA_PUT_U16(msg, CTA_PROTO_SRC_PORT, htons(nfnl_exp_get_src_port(exp, type))); NLA_PUT_U16(msg, CTA_PROTO_DST_PORT, htons(nfnl_exp_get_dst_port(exp, type))); } if (nfnl_exp_test_icmp(exp, type)) { NLA_PUT_U16(msg, CTA_PROTO_ICMP_ID, htons(nfnl_exp_get_icmp_id(exp, type))); NLA_PUT_U8(msg, CTA_PROTO_ICMP_TYPE, nfnl_exp_get_icmp_type(exp, type)); NLA_PUT_U8(msg, CTA_PROTO_ICMP_CODE, nfnl_exp_get_icmp_code(exp, type)); } nla_nest_end(msg, proto); nla_nest_end(msg, tuple); return 0; nla_put_failure: return -NLE_MSGSIZE; }
static int handle_bitrates(struct nl80211_state *state, struct nl_cb *cb, struct nl_msg *msg, int argc, char **argv) { struct nlattr *nl_rates, *nl_band; int i; bool have_legacy_24 = false, have_legacy_5 = false; uint8_t legacy_24[32], legacy_5[32]; int n_legacy_24 = 0, n_legacy_5 = 0; uint8_t *legacy = NULL; int *n_legacy = NULL; bool have_mcs_24 = false, have_mcs_5 = false; #ifdef NL80211_TXRATE_MCS uint8_t mcs_24[77], mcs_5[77]; int n_mcs_24 = 0, n_mcs_5 = 0; uint8_t *mcs = NULL; int *n_mcs = NULL; #endif enum { S_NONE, S_LEGACY, S_MCS, } parser_state = S_NONE; for (i = 0; i < argc; i++) { char *end; double tmpd; #ifdef NL80211_TXRATE_MCS long tmpl; #endif if (strcmp(argv[i], "legacy-2.4") == 0) { if (have_legacy_24) return 1; parser_state = S_LEGACY; legacy = legacy_24; n_legacy = &n_legacy_24; have_legacy_24 = true; } else if (strcmp(argv[i], "legacy-5") == 0) { if (have_legacy_5) return 1; parser_state = S_LEGACY; legacy = legacy_5; n_legacy = &n_legacy_5; have_legacy_5 = true; } #ifdef NL80211_TXRATE_MCS else if (strcmp(argv[i], "mcs-2.4") == 0) { if (have_mcs_24) return 1; parser_state = S_MCS; mcs = mcs_24; n_mcs = &n_mcs_24; have_mcs_24 = true; } else if (strcmp(argv[i], "mcs-5") == 0) { if (have_mcs_5) return 1; parser_state = S_MCS; mcs = mcs_5; n_mcs = &n_mcs_5; have_mcs_5 = true; } #endif else switch (parser_state) { case S_LEGACY: tmpd = strtod(argv[i], &end); if (*end != '\0') return 1; if (tmpd < 1 || tmpd > 255 * 2) return 1; legacy[(*n_legacy)++] = tmpd * 2; break; case S_MCS: #ifdef NL80211_TXRATE_MCS tmpl = strtol(argv[i], &end, 0); if (*end != '\0') return 1; if (tmpl < 0 || tmpl > 255) return 1; mcs[(*n_mcs)++] = tmpl; break; #endif default: return 1; } } nl_rates = nla_nest_start(msg, NL80211_ATTR_TX_RATES); if (!nl_rates) goto nla_put_failure; if (have_legacy_24 || have_mcs_24) { nl_band = nla_nest_start(msg, NL80211_BAND_2GHZ); if (!nl_band) goto nla_put_failure; if (have_legacy_24) nla_put(msg, NL80211_TXRATE_LEGACY, n_legacy_24, legacy_24); #ifdef NL80211_TXRATE_MCS if (have_mcs_24) nla_put(msg, NL80211_TXRATE_MCS, n_mcs_24, mcs_24); #endif nla_nest_end(msg, nl_band); } if (have_legacy_5 || have_mcs_5) { nl_band = nla_nest_start(msg, NL80211_BAND_5GHZ); if (!nl_band) goto nla_put_failure; if (have_legacy_5) nla_put(msg, NL80211_TXRATE_LEGACY, n_legacy_5, legacy_5); #ifdef NL80211_TXRATE_MCS if (have_mcs_5) nla_put(msg, NL80211_TXRATE_MCS, n_mcs_5, mcs_5); #endif nla_nest_end(msg, nl_band); } nla_nest_end(msg, nl_rates); return 0; nla_put_failure: return -ENOBUFS; }
static int choke_dump(struct Qdisc *sch, struct sk_buff *skb) { struct choke_sched_data *q = qdisc_priv(sch); struct nlattr *opts = NULL; struct tc_red_qopt opt = { .limit = q->limit, .flags = q->flags, .qth_min = q->parms.qth_min >> q->parms.Wlog, .qth_max = q->parms.qth_max >> q->parms.Wlog, .Wlog = q->parms.Wlog, .Plog = q->parms.Plog, .Scell_log = q->parms.Scell_log, }; opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt); NLA_PUT_U32(skb, TCA_CHOKE_MAX_P, q->parms.max_P); return nla_nest_end(skb, opts); nla_put_failure: nla_nest_cancel(skb, opts); return -EMSGSIZE; } static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct choke_sched_data *q = qdisc_priv(sch); struct tc_choke_xstats st = { .early = q->stats.prob_drop + q->stats.forced_drop, .marked = q->stats.prob_mark + q->stats.forced_mark, .pdrop = q->stats.pdrop, .other = q->stats.other, .matched = q->stats.matched, }; return gnet_stats_copy_app(d, &st, sizeof(st)); } static void choke_destroy(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); tcf_destroy_chain(&q->filter_list); choke_free(q->tab); } static struct Qdisc *choke_leaf(struct Qdisc *sch, unsigned long arg) { return NULL; } static unsigned long choke_get(struct Qdisc *sch, u32 classid) { return 0; } static void choke_put(struct Qdisc *q, unsigned long cl) { } static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent, u32 classid) { return 0; } static struct tcf_proto **choke_find_tcf(struct Qdisc *sch, unsigned long cl) { struct choke_sched_data *q = qdisc_priv(sch); if (cl) return NULL; return &q->filter_list; } static int choke_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { tcm->tcm_handle |= TC_H_MIN(cl); return 0; } static void choke_walk(struct Qdisc *sch, struct qdisc_walker *arg) { if (!arg->stop) { if (arg->fn(sch, 1, arg) < 0) { arg->stop = 1; return; } arg->count++; } } static const struct Qdisc_class_ops choke_class_ops = { .leaf = choke_leaf, .get = choke_get, .put = choke_put, .tcf_chain = choke_find_tcf, .bind_tcf = choke_bind, .unbind_tcf = choke_put, .dump = choke_dump_class, .walk = choke_walk, }; static struct sk_buff *choke_peek_head(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); return (q->head != q->tail) ? q->tab[q->head] : NULL; } static struct Qdisc_ops choke_qdisc_ops __read_mostly = { .id = "choke", .priv_size = sizeof(struct choke_sched_data), .enqueue = choke_enqueue, .dequeue = choke_dequeue, .peek = choke_peek_head, .drop = choke_drop, .init = choke_init, .destroy = choke_destroy, .reset = choke_reset, .change = choke_change, .dump = choke_dump, .dump_stats = choke_dump_stats, .owner = THIS_MODULE, }; static int __init choke_module_init(void) { return register_qdisc(&choke_qdisc_ops); } static void __exit choke_module_exit(void) { unregister_qdisc(&choke_qdisc_ops); } module_init(choke_module_init) module_exit(choke_module_exit) MODULE_LICENSE("GPL");
static void copy_cacheinfo_into_route(struct rta_cacheinfo *ci, struct rtnl_route *route) { struct rtnl_rtcacheinfo nci = { .rtci_clntref = ci->rta_clntref, .rtci_last_use = ci->rta_lastuse, .rtci_expires = ci->rta_expires, .rtci_error = ci->rta_error, .rtci_used = ci->rta_used, .rtci_id = ci->rta_id, .rtci_ts = ci->rta_ts, .rtci_tsage = ci->rta_tsage, }; rtnl_route_set_cacheinfo(route, &nci); } static int route_msg_parser(struct nl_cache_ops *ops, struct sockaddr_nl *who, struct nlmsghdr *nlh, struct nl_parser_param *pp) { struct rtmsg *rtm; struct rtnl_route *route; struct nlattr *tb[RTA_MAX + 1]; struct nl_addr *src = NULL, *dst = NULL, *addr; int err; route = rtnl_route_alloc(); if (!route) { err = nl_errno(ENOMEM); goto errout; } route->ce_msgtype = nlh->nlmsg_type; err = nlmsg_parse(nlh, sizeof(struct rtmsg), tb, RTA_MAX, route_policy); if (err < 0) goto errout; rtm = nlmsg_data(nlh); rtnl_route_set_family(route, rtm->rtm_family); rtnl_route_set_tos(route, rtm->rtm_tos); rtnl_route_set_table(route, rtm->rtm_table); rtnl_route_set_type(route, rtm->rtm_type); rtnl_route_set_scope(route, rtm->rtm_scope); rtnl_route_set_protocol(route, rtm->rtm_protocol); rtnl_route_set_flags(route, rtm->rtm_flags); if (tb[RTA_DST]) { dst = nla_get_addr(tb[RTA_DST], rtm->rtm_family); if (dst == NULL) goto errout_errno; } else { dst = nl_addr_alloc(0); nl_addr_set_family(dst, rtm->rtm_family); } nl_addr_set_prefixlen(dst, rtm->rtm_dst_len); err = rtnl_route_set_dst(route, dst); if (err < 0) goto errout; nl_addr_put(dst); if (tb[RTA_SRC]) { src = nla_get_addr(tb[RTA_SRC], rtm->rtm_family); if (src == NULL) goto errout_errno; } else if (rtm->rtm_src_len) src = nl_addr_alloc(0); if (src) { nl_addr_set_prefixlen(src, rtm->rtm_src_len); rtnl_route_set_src(route, src); nl_addr_put(src); } if (tb[RTA_IIF]) rtnl_route_set_iif(route, nla_get_string(tb[RTA_IIF])); if (tb[RTA_OIF]) rtnl_route_set_oif(route, nla_get_u32(tb[RTA_OIF])); if (tb[RTA_GATEWAY]) { addr = nla_get_addr(tb[RTA_GATEWAY], route->rt_family); if (addr == NULL) goto errout_errno; rtnl_route_set_gateway(route, addr); nl_addr_put(addr); } if (tb[RTA_PRIORITY]) rtnl_route_set_prio(route, nla_get_u32(tb[RTA_PRIORITY])); if (tb[RTA_PREFSRC]) { addr = nla_get_addr(tb[RTA_PREFSRC], route->rt_family); if (addr == NULL) goto errout_errno; rtnl_route_set_pref_src(route, addr); nl_addr_put(addr); } if (tb[RTA_METRICS]) { struct nlattr *mtb[RTAX_MAX + 1]; int i; err = nla_parse_nested(mtb, RTAX_MAX, tb[RTA_METRICS], NULL); if (err < 0) goto errout; for (i = 1; i <= RTAX_MAX; i++) { if (mtb[i] && nla_len(mtb[i]) >= sizeof(uint32_t)) { uint32_t m = nla_get_u32(mtb[i]); if (rtnl_route_set_metric(route, i, m) < 0) goto errout_errno; } } } if (tb[RTA_MULTIPATH]) { struct rtnl_nexthop *nh; struct rtnexthop *rtnh = nla_data(tb[RTA_MULTIPATH]); size_t tlen = nla_len(tb[RTA_MULTIPATH]); while (tlen >= sizeof(*rtnh) && tlen >= rtnh->rtnh_len) { nh = rtnl_route_nh_alloc(); if (!nh) goto errout; rtnl_route_nh_set_weight(nh, rtnh->rtnh_hops); rtnl_route_nh_set_ifindex(nh, rtnh->rtnh_ifindex); rtnl_route_nh_set_flags(nh, rtnh->rtnh_flags); if (rtnh->rtnh_len > sizeof(*rtnh)) { struct nlattr *ntb[RTA_MAX + 1]; nla_parse(ntb, RTA_MAX, (struct nlattr *) RTNH_DATA(rtnh), rtnh->rtnh_len - sizeof(*rtnh), route_policy); if (ntb[RTA_GATEWAY]) { nh->rtnh_gateway = nla_get_addr( ntb[RTA_GATEWAY], route->rt_family); nh->rtnh_mask = NEXTHOP_HAS_GATEWAY; } } rtnl_route_add_nexthop(route, nh); tlen -= RTNH_ALIGN(rtnh->rtnh_len); rtnh = RTNH_NEXT(rtnh); } } if (tb[RTA_FLOW]) rtnl_route_set_realms(route, nla_get_u32(tb[RTA_FLOW])); if (tb[RTA_CACHEINFO]) copy_cacheinfo_into_route(nla_data(tb[RTA_CACHEINFO]), route); if (tb[RTA_MP_ALGO]) rtnl_route_set_mp_algo(route, nla_get_u32(tb[RTA_MP_ALGO])); err = pp->pp_cb((struct nl_object *) route, pp); if (err < 0) goto errout; err = P_ACCEPT; errout: rtnl_route_put(route); return err; errout_errno: err = nl_get_errno(); goto errout; } static int route_request_update(struct nl_cache *c, struct nl_handle *h) { return nl_rtgen_request(h, RTM_GETROUTE, AF_UNSPEC, NLM_F_DUMP); } /** * @name Cache Management * @{ */ /** * Build a route cache holding all routes currently configured in the kernel * @arg handle netlink handle * * Allocates a new cache, initializes it properly and updates it to * contain all routes currently configured in the kernel. * * @note The caller is responsible for destroying and freeing the * cache after using it. * @return The cache or NULL if an error has occured. */ struct nl_cache *rtnl_route_alloc_cache(struct nl_handle *handle) { struct nl_cache *cache; cache = nl_cache_alloc(&rtnl_route_ops); if (!cache) return NULL; if (handle && nl_cache_refill(handle, cache) < 0) { free(cache); return NULL; } return cache; } /** @} */ /** * @name Route Addition * @{ */ static struct nl_msg *build_route_msg(struct rtnl_route *tmpl, int cmd, int flags) { struct nl_msg *msg; struct nl_addr *addr; int scope, i, oif, nmetrics = 0; struct nlattr *metrics; struct rtmsg rtmsg = { .rtm_family = rtnl_route_get_family(tmpl), .rtm_dst_len = rtnl_route_get_dst_len(tmpl), .rtm_src_len = rtnl_route_get_src_len(tmpl), .rtm_tos = rtnl_route_get_tos(tmpl), .rtm_table = rtnl_route_get_table(tmpl), .rtm_type = rtnl_route_get_type(tmpl), .rtm_protocol = rtnl_route_get_protocol(tmpl), .rtm_flags = rtnl_route_get_flags(tmpl), }; if (rtmsg.rtm_family == AF_UNSPEC) { nl_error(EINVAL, "Cannot build route message, address " \ "family is unknown."); return NULL; } scope = rtnl_route_get_scope(tmpl); if (scope == RT_SCOPE_NOWHERE) { if (rtmsg.rtm_type == RTN_LOCAL) scope = RT_SCOPE_HOST; else { /* XXX Change to UNIVERSE if gw || nexthops */ scope = RT_SCOPE_LINK; } } rtmsg.rtm_scope = scope; msg = nlmsg_alloc_simple(cmd, flags); if (msg == NULL) return NULL; if (nlmsg_append(msg, &rtmsg, sizeof(rtmsg), NLMSG_ALIGNTO) < 0) goto nla_put_failure; addr = rtnl_route_get_dst(tmpl); if (addr) NLA_PUT_ADDR(msg, RTA_DST, addr); addr = rtnl_route_get_src(tmpl); if (addr) NLA_PUT_ADDR(msg, RTA_SRC, addr); addr = rtnl_route_get_gateway(tmpl); if (addr) NLA_PUT_ADDR(msg, RTA_GATEWAY, addr); addr = rtnl_route_get_pref_src(tmpl); if (addr) NLA_PUT_ADDR(msg, RTA_PREFSRC, addr); NLA_PUT_U32(msg, RTA_PRIORITY, rtnl_route_get_prio(tmpl)); oif = rtnl_route_get_oif(tmpl); if (oif != RTNL_LINK_NOT_FOUND) NLA_PUT_U32(msg, RTA_OIF, oif); for (i = 1; i <= RTAX_MAX; i++) if (rtnl_route_get_metric(tmpl, i) != UINT_MAX) nmetrics++; if (nmetrics > 0) { unsigned int val; metrics = nla_nest_start(msg, RTA_METRICS); if (metrics == NULL) goto nla_put_failure; for (i = 1; i <= RTAX_MAX; i++) { val = rtnl_route_get_metric(tmpl, i); if (val != UINT_MAX) NLA_PUT_U32(msg, i, val); } nla_nest_end(msg, metrics); } #if 0 RTA_IIF, RTA_MULTIPATH, RTA_PROTOINFO, RTA_FLOW, RTA_CACHEINFO, RTA_SESSION, RTA_MP_ALGO, #endif return msg; nla_put_failure: nlmsg_free(msg); return NULL; } struct nl_msg *rtnl_route_build_add_request(struct rtnl_route *tmpl, int flags) { return build_route_msg(tmpl, RTM_NEWROUTE, NLM_F_CREATE | flags); } int rtnl_route_add(struct nl_handle *handle, struct rtnl_route *route, int flags) { struct nl_msg *msg; int err; msg = rtnl_route_build_add_request(route, flags); if (!msg) return nl_get_errno(); err = nl_send_auto_complete(handle, msg); nlmsg_free(msg); if (err < 0) return err; return nl_wait_for_ack(handle); } struct nl_msg *rtnl_route_build_del_request(struct rtnl_route *tmpl, int flags) { return build_route_msg(tmpl, RTM_DELROUTE, flags); } int rtnl_route_del(struct nl_handle *handle, struct rtnl_route *route, int flags) { struct nl_msg *msg; int err; msg = rtnl_route_build_del_request(route, flags); if (!msg) return nl_get_errno(); err = nl_send_auto_complete(handle, msg); nlmsg_free(msg); if (err < 0) return err; return nl_wait_for_ack(handle); } /** @} */ static struct nl_af_group route_groups[] = { { AF_INET, RTNLGRP_IPV4_ROUTE }, { AF_INET6, RTNLGRP_IPV6_ROUTE }, { AF_DECnet, RTNLGRP_DECnet_ROUTE }, { END_OF_GROUP_LIST }, }; static struct nl_cache_ops rtnl_route_ops = { .co_name = "route/route", .co_hdrsize = sizeof(struct rtmsg), .co_msgtypes = { { RTM_NEWROUTE, NL_ACT_NEW, "new" }, { RTM_DELROUTE, NL_ACT_DEL, "del" }, { RTM_GETROUTE, NL_ACT_GET, "get" }, END_OF_MSGTYPES_LIST, }, .co_protocol = NETLINK_ROUTE, .co_groups = route_groups, .co_request_update = route_request_update, .co_msg_parser = route_msg_parser, .co_obj_ops = &route_obj_ops, }; static void __init route_init(void) { nl_cache_mngt_register(&rtnl_route_ops); } static void __exit route_exit(void) { nl_cache_mngt_unregister(&rtnl_route_ops); }
static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) { struct nlmsghdr *nlh; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; struct tc_action_ops *a_o; struct tc_action a; int ret = 0; struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh); struct nlattr *kind = find_dump_kind(cb->nlh); if (kind == NULL) { pr_info("tc_dump_action: action bad kind\n"); return 0; } a_o = tc_lookup_action(kind); if (a_o == NULL) return 0; memset(&a, 0, sizeof(struct tc_action)); a.ops = a_o; if (a_o->walk == NULL) { WARN(1, "tc_dump_action: %s !capable of dumping table\n", a_o->kind); goto out_module_put; } nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type, sizeof(*t), 0); if (!nlh) goto out_module_put; t = nlmsg_data(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; nest = nla_nest_start(skb, TCA_ACT_TAB); if (nest == NULL) goto out_module_put; ret = a_o->walk(skb, cb, RTM_GETACTION, &a); if (ret < 0) goto out_module_put; if (ret > 0) { nla_nest_end(skb, nest); ret = skb->len; } else nla_nest_cancel(skb, nest); nlh->nlmsg_len = skb_tail_pointer(skb) - b; if (NETLINK_CB(cb->skb).portid && ret) nlh->nlmsg_flags |= NLM_F_MULTI; module_put(a_o->owner); return skb->len; out_module_put: module_put(a_o->owner); nlmsg_trim(skb, b); return skb->len; }
/* Setter */ static int set_interface_meshparam(struct nl80211_state *state, struct nl_cb *cb, struct nl_msg *msg, int argc, char **argv, enum id_input id) { const struct mesh_param_descr *mdescr; struct nlattr *container; uint32_t ret; int err; container = nla_nest_start(msg, NL80211_ATTR_MESH_PARAMS); if (!container) return -ENOBUFS; if (!argc) return 1; while (argc) { const char *name; char *value; _any any; memset(&any, 0, sizeof(_any)); name = argv[0]; value = strchr(name, '='); if (value) { *value = '\0'; value++; argc--; argv++; } else { /* backward compat -- accept w/o '=' */ if (argc < 2) { printf("Must specify a value for %s.\n", name); return 2; } value = argv[1]; argc -= 2; argv += 2; } mdescr = find_mesh_param(name); if (!mdescr) return 2; /* Parse the new value */ ret = mdescr->parse_fn(value, &any); if (ret != 0) { if (mdescr->mesh_param_num == NL80211_MESHCONF_POWER_MODE) printf("%s must be set to active, light or " "deep.\n", mdescr->name); else printf("%s must be set to a number " "between 0 and %u\n", mdescr->name, ret); return 2; } err = mdescr->nla_put_fn(msg, mdescr->mesh_param_num, &any); if (err) return err; } nla_nest_end(msg, container); return err; }
static int handle_bitrates(struct nl80211_state *state, struct nl_msg *msg, int argc, char **argv, enum id_input id) { struct nlattr *nl_rates, *nl_band; int i; bool have_legacy_24 = false, have_legacy_5 = false; uint8_t legacy_24[32], legacy_5[32]; int n_legacy_24 = 0, n_legacy_5 = 0; uint8_t *legacy = NULL; int *n_legacy = NULL; bool have_ht_mcs_24 = false, have_ht_mcs_5 = false; bool have_vht_mcs_24 = false, have_vht_mcs_5 = false; uint8_t ht_mcs_24[77], ht_mcs_5[77]; int n_ht_mcs_24 = 0, n_ht_mcs_5 = 0; struct nl80211_txrate_vht txrate_vht_24 = {}; struct nl80211_txrate_vht txrate_vht_5 = {}; uint8_t *mcs = NULL; int *n_mcs = NULL; char *vht_argv_5[VHT_ARGC_MAX] = {}; char *vht_argv_24[VHT_ARGC_MAX] = {}; char **vht_argv = NULL; int vht_argc_5 = 0; int vht_argc_24 = 0; int *vht_argc = NULL; int sgi_24 = 0, sgi_5 = 0, lgi_24 = 0, lgi_5 = 0; enum { S_NONE, S_LEGACY, S_HT, S_VHT, S_GI, } parser_state = S_NONE; for (i = 0; i < argc; i++) { char *end; double tmpd; long tmpl; if (strcmp(argv[i], "legacy-2.4") == 0) { if (have_legacy_24) return 1; parser_state = S_LEGACY; legacy = legacy_24; n_legacy = &n_legacy_24; have_legacy_24 = true; } else if (strcmp(argv[i], "legacy-5") == 0) { if (have_legacy_5) return 1; parser_state = S_LEGACY; legacy = legacy_5; n_legacy = &n_legacy_5; have_legacy_5 = true; } else if (strcmp(argv[i], "ht-mcs-2.4") == 0) { if (have_ht_mcs_24) return 1; parser_state = S_HT; mcs = ht_mcs_24; n_mcs = &n_ht_mcs_24; have_ht_mcs_24 = true; } else if (strcmp(argv[i], "ht-mcs-5") == 0) { if (have_ht_mcs_5) return 1; parser_state = S_HT; mcs = ht_mcs_5; n_mcs = &n_ht_mcs_5; have_ht_mcs_5 = true; } else if (strcmp(argv[i], "vht-mcs-2.4") == 0) { if (have_vht_mcs_24) return 1; parser_state = S_VHT; vht_argv = vht_argv_24; vht_argc = &vht_argc_24; have_vht_mcs_24 = true; } else if (strcmp(argv[i], "vht-mcs-5") == 0) { if (have_vht_mcs_5) return 1; parser_state = S_VHT; vht_argv = vht_argv_5; vht_argc = &vht_argc_5; have_vht_mcs_5 = true; } else if (strcmp(argv[i], "sgi-2.4") == 0) { sgi_24 = 1; parser_state = S_GI; } else if (strcmp(argv[i], "sgi-5") == 0) { sgi_5 = 1; parser_state = S_GI; } else if (strcmp(argv[i], "lgi-2.4") == 0) { lgi_24 = 1; parser_state = S_GI; } else if (strcmp(argv[i], "lgi-5") == 0) { lgi_5 = 1; parser_state = S_GI; } else switch (parser_state) { case S_LEGACY: tmpd = strtod(argv[i], &end); if (*end != '\0') return 1; if (tmpd < 1 || tmpd > 255 * 2) return 1; legacy[(*n_legacy)++] = tmpd * 2; break; case S_HT: tmpl = strtol(argv[i], &end, 0); if (*end != '\0') return 1; if (tmpl < 0 || tmpl > 255) return 1; mcs[(*n_mcs)++] = tmpl; break; case S_VHT: if (*vht_argc >= VHT_ARGC_MAX) return 1; vht_argv[(*vht_argc)++] = argv[i]; break; case S_GI: break; default: return 1; } } if (have_vht_mcs_24) if(!setup_vht(&txrate_vht_24, vht_argc_24, vht_argv_24)) return -EINVAL; if (have_vht_mcs_5) if(!setup_vht(&txrate_vht_5, vht_argc_5, vht_argv_5)) return -EINVAL; if (sgi_5 && lgi_5) return 1; if (sgi_24 && lgi_24) return 1; nl_rates = nla_nest_start(msg, NL80211_ATTR_TX_RATES); if (!nl_rates) goto nla_put_failure; if (have_legacy_24 || have_ht_mcs_24 || have_vht_mcs_24 || sgi_24 || lgi_24) { nl_band = nla_nest_start(msg, NL80211_BAND_2GHZ); if (!nl_band) goto nla_put_failure; if (have_legacy_24) nla_put(msg, NL80211_TXRATE_LEGACY, n_legacy_24, legacy_24); if (have_ht_mcs_24) nla_put(msg, NL80211_TXRATE_HT, n_ht_mcs_24, ht_mcs_24); if (have_vht_mcs_24) nla_put(msg, NL80211_TXRATE_VHT, sizeof(txrate_vht_24), &txrate_vht_24); if (sgi_24) nla_put_u8(msg, NL80211_TXRATE_GI, NL80211_TXRATE_FORCE_SGI); if (lgi_24) nla_put_u8(msg, NL80211_TXRATE_GI, NL80211_TXRATE_FORCE_LGI); nla_nest_end(msg, nl_band); } if (have_legacy_5 || have_ht_mcs_5 || have_vht_mcs_5 || sgi_5 || lgi_5) { nl_band = nla_nest_start(msg, NL80211_BAND_5GHZ); if (!nl_band) goto nla_put_failure; if (have_legacy_5) nla_put(msg, NL80211_TXRATE_LEGACY, n_legacy_5, legacy_5); if (have_ht_mcs_5) nla_put(msg, NL80211_TXRATE_HT, n_ht_mcs_5, ht_mcs_5); if (have_vht_mcs_5) nla_put(msg, NL80211_TXRATE_VHT, sizeof(txrate_vht_5), &txrate_vht_5); if (sgi_5) nla_put_u8(msg, NL80211_TXRATE_GI, NL80211_TXRATE_FORCE_SGI); if (lgi_5) nla_put_u8(msg, NL80211_TXRATE_GI, NL80211_TXRATE_FORCE_LGI); nla_nest_end(msg, nl_band); } nla_nest_end(msg, nl_rates); return 0; nla_put_failure: return -ENOBUFS; }
static int join_mesh(struct nl80211_state *state, struct nl_cb *cb, struct nl_msg *msg, int argc, char **argv, enum id_input id) { struct nlattr *container; float rate; int bintval, dtim_period; char *end; if (argc < 1) return 1; NLA_PUT(msg, NL80211_ATTR_MESH_ID, strlen(argv[0]), argv[0]); argc--; argv++; if (argc > 1 && strcmp(argv[0], "mcast-rate") == 0) { argv++; argc--; rate = strtod(argv[0], &end); if (*end != '\0') return 1; NLA_PUT_U32(msg, NL80211_ATTR_MCAST_RATE, (int)(rate * 10)); argv++; argc--; } if (argc > 1 && strcmp(argv[0], "beacon-interval") == 0) { argc--; argv++; bintval = strtoul(argv[0], &end, 10); if (*end != '\0') return 1; NLA_PUT_U32(msg, NL80211_ATTR_BEACON_INTERVAL, bintval); argv++; argc--; } if (argc > 1 && strcmp(argv[0], "dtim-period") == 0) { argc--; argv++; dtim_period = strtoul(argv[0], &end, 10); if (*end != '\0') return 1; NLA_PUT_U32(msg, NL80211_ATTR_DTIM_PERIOD, dtim_period); argv++; argc--; } container = nla_nest_start(msg, NL80211_ATTR_MESH_SETUP); if (!container) return -ENOBUFS; if (argc > 1 && strcmp(argv[0], "vendor_sync") == 0) { argv++; argc--; if (strcmp(argv[0], "on") == 0) NLA_PUT_U8(msg, NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC, 1); else NLA_PUT_U8(msg, NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC, 0); argv++; argc--; } /* parse and put other NL80211_ATTR_MESH_SETUP elements here */ nla_nest_end(msg, container); if (!argc) return 0; return set_interface_meshparam(state, cb, msg, argc, argv, id); nla_put_failure: return -ENOBUFS; }
static int handle_wowlan_enable(struct nl80211_state *state, struct nl_cb *cb, struct nl_msg *msg, int argc, char **argv, enum id_input id) { struct nlattr *wowlan, *pattern; struct nl_msg *patterns = NULL; enum { PS_REG, PS_PAT, } parse_state = PS_REG; int err = -ENOBUFS; unsigned char *pat, *mask; size_t patlen; int patnum = 0; wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS); if (!wowlan) return -ENOBUFS; while (argc) { switch (parse_state) { case PS_REG: if (strcmp(argv[0], "any") == 0) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY); else if (strcmp(argv[0], "disconnect") == 0) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT); else if (strcmp(argv[0], "magic-packet") == 0) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT); else if (strcmp(argv[0], "gtk-rekey-failure") == 0) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE); else if (strcmp(argv[0], "eap-identity-request") == 0) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST); else if (strcmp(argv[0], "4way-handshake") == 0) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE); else if (strcmp(argv[0], "rfkill-release") == 0) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE); else if (strcmp(argv[0], "patterns") == 0) { parse_state = PS_PAT; patterns = nlmsg_alloc(); if (!patterns) { err = -ENOMEM; goto nla_put_failure; } } else { err = 1; goto nla_put_failure; } break; case PS_PAT: if (parse_hex_mask(argv[0], &pat, &patlen, &mask)) { err = 1; goto nla_put_failure; } pattern = nla_nest_start(patterns, ++patnum); NLA_PUT(patterns, NL80211_WOWLAN_PKTPAT_MASK, DIV_ROUND_UP(patlen, 8), mask); NLA_PUT(patterns, NL80211_WOWLAN_PKTPAT_PATTERN, patlen, pat); nla_nest_end(patterns, pattern); free(mask); free(pat); break; } argv++; argc--; } if (patterns) nla_put_nested(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, patterns); nla_nest_end(msg, wowlan); err = 0; nla_put_failure: nlmsg_free(patterns); return err; }
static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) { struct gred_sched *table = qdisc_priv(sch); struct nlattr *parms, *opts = NULL; int i; u32 max_p[MAX_DPs]; struct tc_gred_sopt sopt = { .DPs = table->DPs, .def_DP = table->def, .grio = gred_rio_mode(table), .flags = table->red_flags, }; opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt); for (i = 0; i < MAX_DPs; i++) { struct gred_sched_data *q = table->tab[i]; max_p[i] = q ? q->parms.max_P : 0; } nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p); parms = nla_nest_start(skb, TCA_GRED_PARMS); if (parms == NULL) goto nla_put_failure; for (i = 0; i < MAX_DPs; i++) { struct gred_sched_data *q = table->tab[i]; struct tc_gred_qopt opt; memset(&opt, 0, sizeof(opt)); if (!q) { /* hack -- fix at some point with proper message This is how we indicate to tc that there is no VQ at this DP */ opt.DP = MAX_DPs + i; goto append_opt; } opt.limit = q->limit; opt.DP = q->DP; opt.backlog = q->backlog; opt.prio = q->prio; opt.qth_min = q->parms.qth_min >> q->parms.Wlog; opt.qth_max = q->parms.qth_max >> q->parms.Wlog; opt.Wlog = q->parms.Wlog; opt.Plog = q->parms.Plog; opt.Scell_log = q->parms.Scell_log; opt.other = q->stats.other; opt.early = q->stats.prob_drop; opt.forced = q->stats.forced_drop; opt.pdrop = q->stats.pdrop; opt.packets = q->packetsin; opt.bytesin = q->bytesin; if (gred_wred_mode(table)) gred_load_wred_set(table, q); opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg); append_opt: if (nla_append(skb, sizeof(opt), &opt) < 0) goto nla_put_failure; } nla_nest_end(skb, parms); return nla_nest_end(skb, opts); nla_put_failure: nla_nest_cancel(skb, opts); return -EMSGSIZE; } static void gred_destroy(struct Qdisc *sch) { struct gred_sched *table = qdisc_priv(sch); int i; for (i = 0; i < table->DPs; i++) { if (table->tab[i]) gred_destroy_vq(table->tab[i]); } } static struct Qdisc_ops gred_qdisc_ops __read_mostly = { .id = "gred", .priv_size = sizeof(struct gred_sched), .enqueue = gred_enqueue, .dequeue = gred_dequeue, .peek = qdisc_peek_head, .drop = gred_drop, .init = gred_init, .reset = gred_reset, .destroy = gred_destroy, .change = gred_change, .dump = gred_dump, .owner = THIS_MODULE, }; static int __init gred_module_init(void) { return register_qdisc(&gred_qdisc_ops); } static void __exit gred_module_exit(void) { unregister_qdisc(&gred_qdisc_ops); } module_init(gred_module_init) module_exit(gred_module_exit) MODULE_LICENSE("GPL");
/** * virNetDevMacVLanCreate: * * @ifname: The name the interface is supposed to have; optional parameter * @type: The type of device, i.e., "macvtap", "macvlan" * @macaddress: The MAC address of the device * @srcdev: The name of the 'link' device * @macvlan_mode: The macvlan mode to use * @retry: Pointer to integer that will be '1' upon return if an interface * with the same name already exists and it is worth to try * again with a different name * * Create a macvtap device with the given properties. * * Returns 0 on success, -1 on fatal error. */ int virNetDevMacVLanCreate(const char *ifname, const char *type, const unsigned char *macaddress, const char *srcdev, uint32_t macvlan_mode, int *retry) { int rc = -1; struct nlmsghdr *resp; struct nlmsgerr *err; struct ifinfomsg ifinfo = { .ifi_family = AF_UNSPEC }; int ifindex; unsigned char *recvbuf = NULL; unsigned int recvbuflen; struct nl_msg *nl_msg; struct nlattr *linkinfo, *info_data; if (virNetDevGetIndex(srcdev, &ifindex) < 0) return -1; *retry = 0; nl_msg = nlmsg_alloc_simple(RTM_NEWLINK, NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL); if (!nl_msg) { virReportOOMError(); return -1; } if (nlmsg_append(nl_msg, &ifinfo, sizeof(ifinfo), NLMSG_ALIGNTO) < 0) goto buffer_too_small; if (nla_put_u32(nl_msg, IFLA_LINK, ifindex) < 0) goto buffer_too_small; if (nla_put(nl_msg, IFLA_ADDRESS, VIR_MAC_BUFLEN, macaddress) < 0) goto buffer_too_small; if (ifname && nla_put(nl_msg, IFLA_IFNAME, strlen(ifname)+1, ifname) < 0) goto buffer_too_small; if (!(linkinfo = nla_nest_start(nl_msg, IFLA_LINKINFO))) goto buffer_too_small; if (nla_put(nl_msg, IFLA_INFO_KIND, strlen(type), type) < 0) goto buffer_too_small; if (macvlan_mode > 0) { if (!(info_data = nla_nest_start(nl_msg, IFLA_INFO_DATA))) goto buffer_too_small; if (nla_put(nl_msg, IFLA_MACVLAN_MODE, sizeof(macvlan_mode), &macvlan_mode) < 0) goto buffer_too_small; nla_nest_end(nl_msg, info_data); } nla_nest_end(nl_msg, linkinfo); if (virNetlinkCommand(nl_msg, &recvbuf, &recvbuflen, 0) < 0) { goto cleanup; } if (recvbuflen < NLMSG_LENGTH(0) || recvbuf == NULL) goto malformed_resp; resp = (struct nlmsghdr *)recvbuf; switch (resp->nlmsg_type) { case NLMSG_ERROR: err = (struct nlmsgerr *)NLMSG_DATA(resp); if (resp->nlmsg_len < NLMSG_LENGTH(sizeof(*err))) goto malformed_resp; switch (err->error) { case 0: break; case -EEXIST: *retry = 1; goto cleanup; default: virReportSystemError(-err->error, _("error creating %s type of interface"), type); goto cleanup; } break; case NLMSG_DONE: break; default: goto malformed_resp; } rc = 0; cleanup: nlmsg_free(nl_msg); VIR_FREE(recvbuf); return rc; malformed_resp: virNetDevError(VIR_ERR_INTERNAL_ERROR, "%s", _("malformed netlink response message")); goto cleanup; buffer_too_small: virNetDevError(VIR_ERR_INTERNAL_ERROR, "%s", _("allocated netlink buffer is too small")); goto cleanup; }
int match_nl_set_port(struct nl_sock *nsd, uint32_t pid, unsigned int ifindex, int family, struct net_mat_port *port) { uint8_t cmd = NET_MAT_PORT_CMD_SET_PORTS; struct nlattr *tb[NET_MAT_MAX+1]; struct nlattr *nest, *nest1; struct nlmsghdr *nlh; struct match_msg *msg; sigset_t bs; int err = 0; msg = match_nl_alloc_msg(cmd, pid, NLM_F_REQUEST|NLM_F_ACK, 0, family); if (!msg) { MAT_LOG(ERR, "Error: Allocation failure\n"); return -ENOMSG; } if (nla_put_u32(msg->nlbuf, NET_MAT_IDENTIFIER_TYPE, NET_MAT_IDENTIFIER_IFINDEX) || nla_put_u32(msg->nlbuf, NET_MAT_IDENTIFIER, ifindex)) { MAT_LOG(ERR, "Error: Identifier put failed\n"); match_nl_free_msg(msg); return -EMSGSIZE; } nest = nla_nest_start(msg->nlbuf, NET_MAT_PORTS); if (!nest) { match_nl_free_msg(msg); return -EMSGSIZE; } nest1 = nla_nest_start(msg->nlbuf, NET_MAT_PORTS); match_put_port(msg->nlbuf, port); nla_nest_end(msg->nlbuf, nest1); nla_nest_end(msg->nlbuf, nest); nl_send_auto(nsd, msg->nlbuf); match_nl_free_msg(msg); sigemptyset(&bs); sigaddset(&bs, SIGINT); sigprocmask(SIG_UNBLOCK, &bs, NULL); msg = match_nl_recv_msg(nsd, &err); sigprocmask(SIG_BLOCK, &bs, NULL); if (!msg) return -EINVAL; nlh = msg->msg; err = genlmsg_parse(nlh, 0, tb, NET_MAT_MAX, match_get_tables_policy); if (err < 0) { MAT_LOG(ERR, "Warning: unable to parse set port msg\n"); match_nl_free_msg(msg); return err; } err = match_nl_table_cmd_to_type(stdout, true, 0, tb); if (err) { match_nl_free_msg(msg); return err; } if (tb[NET_MAT_PORTS]) { MAT_LOG(ERR, "Failed to set:\n"); match_get_ports(stdout, verbose, tb[NET_MAT_PORTS], NULL); match_nl_free_msg(msg); return -EINVAL; } match_nl_free_msg(msg); return 0; }