static int ieee802154_llsec_fill_key_id(struct sk_buff *msg, const struct ieee802154_llsec_key_id *desc) { if (nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_MODE, desc->mode)) return -EMSGSIZE; if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) { if (nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, desc->device_addr.pan_id)) return -EMSGSIZE; if (desc->device_addr.mode == IEEE802154_ADDR_SHORT && nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR, desc->device_addr.short_addr)) return -EMSGSIZE; if (desc->device_addr.mode == IEEE802154_ADDR_LONG && nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, desc->device_addr.extended_addr)) return -EMSGSIZE; } if (desc->mode != IEEE802154_SCF_KEY_IMPLICIT && nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_ID, desc->id)) return -EMSGSIZE; if (desc->mode == IEEE802154_SCF_KEY_SHORT_INDEX && nla_put_u32(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT, le32_to_cpu(desc->short_source))) return -EMSGSIZE; if (desc->mode == IEEE802154_SCF_KEY_HW_INDEX && nla_put_hwaddr(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED, desc->extended_source)) return -EMSGSIZE; return 0; }
int lxc_netdev_move_by_index(int ifindex, pid_t pid, const char* ifname) { struct nl_handler nlh; struct nlmsg *nlmsg = NULL; struct ifinfomsg *ifi; int err; err = netlink_open(&nlh, NETLINK_ROUTE); if (err) return err; err = -ENOMEM; nlmsg = nlmsg_alloc(NLMSG_GOOD_SIZE); if (!nlmsg) goto out; nlmsg->nlmsghdr->nlmsg_flags = NLM_F_REQUEST|NLM_F_ACK; nlmsg->nlmsghdr->nlmsg_type = RTM_NEWLINK; ifi = nlmsg_reserve(nlmsg, sizeof(struct ifinfomsg)); if (!ifi) goto out; ifi->ifi_family = AF_UNSPEC; ifi->ifi_index = ifindex; if (nla_put_u32(nlmsg, IFLA_NET_NS_PID, pid)) goto out; if (ifname != NULL) { if (nla_put_string(nlmsg, IFLA_IFNAME, ifname)) goto out; } err = netlink_transaction(&nlh, nlmsg, nlmsg); out: netlink_close(&nlh); nlmsg_free(nlmsg); return err; }
static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct ip_tunnel *t = netdev_priv(dev); struct ip_tunnel_parm *p = &t->parms; if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || nla_put_be16(skb, IFLA_GRE_IFLAGS, tnl_flags_to_gre_flags(p->i_flags)) || nla_put_be16(skb, IFLA_GRE_OFLAGS, tnl_flags_to_gre_flags(p->o_flags)) || nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || nla_put_be32(skb, IFLA_GRE_LOCAL, p->iph.saddr) || nla_put_be32(skb, IFLA_GRE_REMOTE, p->iph.daddr) || nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) || nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) || nla_put_u8(skb, IFLA_GRE_PMTUDISC, !!(p->iph.frag_off & htons(IP_DF)))) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; }
int ieee802154_nl_start_confirm(struct net_device *dev, u8 status) { struct sk_buff *msg; pr_debug("%s\n", __func__); msg = ieee802154_nl_create(0, IEEE802154_START_CONF); if (!msg) return -ENOBUFS; if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, dev->dev_addr) || nla_put_u8(msg, IEEE802154_ATTR_STATUS, status)) goto nla_put_failure; return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; }
void decoder::send_request(size_t seq) { struct nl_msg *msg; msg = nlmsg_alloc(); genlmsg_put(msg, NL_AUTO_PID, NL_AUTO_SEQ, m_io->genl_family(), 0, 0, BATADV_HLP_C_FRAME, 1); nla_put_u32(msg, BATADV_HLP_A_IFINDEX, m_io->ifindex()); nla_put_u8(msg, BATADV_HLP_A_TYPE, REQ_PACKET); nla_put(msg, BATADV_HLP_A_SRC, ETH_ALEN, _key.src); nla_put(msg, BATADV_HLP_A_DST, ETH_ALEN, _key.dst); nla_put_u16(msg, BATADV_HLP_A_BLOCK, _key.block); nla_put_u16(msg, BATADV_HLP_A_RANK, this->rank()); nla_put_u16(msg, BATADV_HLP_A_SEQ, seq); m_io->send_msg(msg); nlmsg_free(msg); inc("request sent"); VLOG(LOG_CTRL) << "Decoder " << m_coder << ": Sent request packet"; }
void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len) { struct sk_buff *skb; if (!buf || buf_len == 0) return; skb = cfg80211_testmode_alloc_event_skb(ar->wiphy, buf_len, GFP_KERNEL); if (!skb) { ath6kl_warn("failed to allocate testmode rx skb!\n"); return; } if (nla_put_u32(skb, ATH6KL_TM_ATTR_CMD, ATH6KL_TM_CMD_TCMD) || nla_put(skb, ATH6KL_TM_ATTR_DATA, buf_len, buf)) goto nla_put_failure; cfg80211_testmode_event(skb, GFP_KERNEL); return; nla_put_failure: kfree_skb(skb); ath6kl_warn("nla_put failed on testmode rx skb!\n"); }
static int bond_fill_slave_info(struct sk_buff *skb, const struct net_device *bond_dev, const struct net_device *slave_dev) { struct slave *slave = bond_slave_get_rtnl(slave_dev); if (nla_put_u8(skb, IFLA_BOND_SLAVE_STATE, bond_slave_state(slave))) goto nla_put_failure; if (nla_put_u8(skb, IFLA_BOND_SLAVE_MII_STATUS, slave->link)) goto nla_put_failure; if (nla_put_u32(skb, IFLA_BOND_SLAVE_LINK_FAILURE_COUNT, slave->link_failure_count)) goto nla_put_failure; if (nla_put(skb, IFLA_BOND_SLAVE_PERM_HWADDR, slave_dev->addr_len, slave->perm_hwaddr)) goto nla_put_failure; if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id)) goto nla_put_failure; if (slave->bond->params.mode == BOND_MODE_8023AD) { const struct aggregator *agg; agg = SLAVE_AD_INFO(slave).port.aggregator; if (agg) if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID, agg->aggregator_identifier)) goto nla_put_failure; } return 0; nla_put_failure: return -EMSGSIZE; }
int ac_kmod_authorize_station(struct capwap_sessionid_element* sessionid, const uint8_t* macaddress, int ifindex, uint8_t radioid, uint8_t wlanid, uint16_t vlan) { int result; struct nl_msg* msg; ASSERT(sessionid != NULL); ASSERT(macaddress != NULL); ASSERT(ifindex >= 0); ASSERT(IS_VALID_RADIOID(radioid)); ASSERT(vlan < VLAN_MAX); /* */ msg = nlmsg_alloc(); if (!msg) { return -1; } /* */ genlmsg_put(msg, 0, 0, g_ac.kmodhandle.nlsmartcapwap_id, 0, 0, NLSMARTCAPWAP_CMD_AUTH_STATION, 0); nla_put(msg, NLSMARTCAPWAP_ATTR_SESSION_ID, sizeof(struct capwap_sessionid_element), sessionid); nla_put(msg, NLSMARTCAPWAP_ATTR_MACADDRESS, MACADDRESS_EUI48_LENGTH, macaddress); nla_put_u32(msg, NLSMARTCAPWAP_ATTR_IFPHY_INDEX, (unsigned long)ifindex); nla_put_u8(msg, NLSMARTCAPWAP_ATTR_RADIOID, radioid); nla_put_u8(msg, NLSMARTCAPWAP_ATTR_WLANID, wlanid); if (vlan > 0) { nla_put_u16(msg, NLSMARTCAPWAP_ATTR_VLAN, ifindex); } /* */ result = ac_kmod_send_and_recv_msg(msg, NULL, NULL); if (result) { log_printf(LOG_ERR, "Unable to authorize station: %d", result); } /* */ nlmsg_free(msg); return result; }
static int tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, u32 sock) { int err; void *hdr; struct nlattr *nest; struct sk_buff *args; static struct tipc_nl_compat_cmd_dump dump = { .dumpit = tipc_nl_publ_dump, .format = __tipc_nl_compat_publ_dump, }; args = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!args) return -ENOMEM; hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET); nest = nla_nest_start(args, TIPC_NLA_SOCK); if (!nest) { kfree_skb(args); return -EMSGSIZE; } if (nla_put_u32(args, TIPC_NLA_SOCK_REF, sock)) { kfree_skb(args); return -EMSGSIZE; } nla_nest_end(args, nest); genlmsg_end(args, hdr); err = __tipc_nl_compat_dumpit(&dump, msg, args); kfree_skb(args); return err; }
/* This is called if for some node with MAC address addr, we only get frames * over one of the slave interfaces. This would indicate an open network ring * (i.e. a link has failed somewhere). */ void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN], struct hsr_port *port) { struct sk_buff *skb; void *msg_head; struct hsr_port *master; int res; skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); if (!skb) goto fail; msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_RING_ERROR); if (!msg_head) goto nla_put_failure; res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr); if (res < 0) goto nla_put_failure; res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex); if (res < 0) goto nla_put_failure; genlmsg_end(skb, msg_head); genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC); return; nla_put_failure: kfree_skb(skb); fail: rcu_read_lock(); master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); netdev_warn(master->dev, "Could not send HSR ring error message\n"); rcu_read_unlock(); }
void nl80211_remove_iface(struct nl80211_data *ctx, int ifidx) { struct nl_msg *msg = NULL; fprintf(stderr, "nl80211: Remove interface ifindex=%d\n", ifidx); msg = nlmsg_alloc(); if (!msg) goto failed; if (!nl80211_cmd(ctx, msg, 0, NL80211_CMD_DEL_INTERFACE)) goto failed; if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, ifidx)) goto failed; if (send_and_recv_msgs(ctx, msg, NULL, NULL) == 0) return; failed: if (msg) nlmsg_free(msg); fprintf(stderr, "failed to remove interface (ifidx=%d)\n", ifidx); }
int nl80211_unset_ap(struct nl80211_data *ctx) { struct nl_msg *msg = NULL; int ret; if (!ctx) return -1; if (ctx->ifindex<0) return -1; msg = nlmsg_alloc(); if (!msg) goto fail; if (!nl80211_cmd(ctx, msg, 0, NL80211_CMD_DEL_BEACON)) goto fail; if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, ctx->ifindex)) goto fail; ret = send_and_recv_msgs(ctx, msg, NULL, NULL); if (ret) { fprintf(stderr, "nl80211: advanced settings failed: %d (%s)\n", ret, strerror(-ret)); goto fail; } return 0; fail: if (msg) nlmsg_free(msg); return -1; }
int ieee802154_nl_beacon_indic(struct net_device *dev, u16 panid, u16 coord_addr) { struct sk_buff *msg; pr_debug("%s\n", __func__); msg = ieee802154_nl_create(0, IEEE802154_BEACON_NOTIFY_INDIC); if (!msg) return -ENOBUFS; if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, dev->dev_addr) || nla_put_u16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr) || nla_put_u16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid)) goto nla_put_failure; return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; }
static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh) { struct fib4_rule *rule4 = (struct fib4_rule *) rule; frh->dst_len = rule4->dst_len; frh->src_len = rule4->src_len; frh->tos = rule4->tos; if (rule4->dst_len) nla_put_be32(skb, FRA_DST, rule4->dst); if (rule4->src_len) nla_put_be32(skb, FRA_SRC, rule4->src); #ifdef CONFIG_IP_ROUTE_CLASSID if (rule4->tclassid) nla_put_u32(skb, FRA_FLOW, rule4->tclassid); #endif return 0; nla_put_failure: return -ENOBUFS; }
static int put_char_array(struct sk_buff *skb, const char* const* chars, int type, int nested_type) { struct nlattr* nest_attr; int ret = 0; if ( chars ) { int i=0; nest_attr = nla_nest_start(skb, type); if ( !nest_attr ) { ret = -EMSGSIZE; goto failure; } while ( chars[i] ) { // printk("Putting arg\n"); ret = nla_put_string(skb, nested_type, chars[i]); if (ret != 0) goto failure; i++; } // printk("Putting length: %d\n", i); ret = nla_put_u32(skb, DIRECTOR_A_LENGTH, i); // Count of elements if (ret != 0) goto failure; nla_nest_end(skb, nest_attr); } return ret; failure: minfo(ERR1, "Putting of char array has failed: %d", ret); return ret; }
int lxc_netdev_move(char *ifname, pid_t pid) { struct nl_handler nlh; struct nlmsg *nlmsg = NULL; struct link_req *link_req; int err, index; index = if_nametoindex(ifname); if (!ifname) return -EINVAL; err = netlink_open(&nlh, NETLINK_ROUTE); if (err) return err; err = -ENOMEM; nlmsg = nlmsg_alloc(NLMSG_GOOD_SIZE); if (!nlmsg) goto out; link_req = (struct link_req *)nlmsg; link_req->ifinfomsg.ifi_family = AF_UNSPEC; link_req->ifinfomsg.ifi_index = index; nlmsg->nlmsghdr.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)); nlmsg->nlmsghdr.nlmsg_flags = NLM_F_REQUEST|NLM_F_ACK; nlmsg->nlmsghdr.nlmsg_type = RTM_NEWLINK; if (nla_put_u32(nlmsg, IFLA_NET_NS_PID, pid)) goto out; err = netlink_transaction(&nlh, nlmsg, nlmsg); out: netlink_close(&nlh); nlmsg_free(nlmsg); return err; }
int opal_btl_usnic_nl_ip_rt_lookup(struct usnic_rtnl_sk *unlsk, const char *src_ifname, uint32_t src_addr, uint32_t dst_addr, int *metric) { struct nl_msg *nlm; struct rtmsg rmsg; struct nl_lookup_arg arg; int msg_cnt; int err; int oif; oif = if_nametoindex(src_ifname); if (0 == oif) { return errno; } arg.nh_addr = 0; arg.oif = oif; arg.found = 0; arg.replied = 0; arg.unlsk = unlsk; arg.msg_count = msg_cnt = 0; memset(&rmsg, 0, sizeof(rmsg)); rmsg.rtm_family = AF_INET; rmsg.rtm_dst_len = sizeof(dst_addr)*8; rmsg.rtm_src_len = sizeof(src_addr)*8; nlm = nlmsg_alloc_simple(RTM_GETROUTE, 0); nlmsg_append(nlm, &rmsg, sizeof(rmsg), NLMSG_ALIGNTO); nla_put_u32(nlm, RTA_DST, dst_addr); nla_put_u32(nlm, RTA_SRC, src_addr); err = rtnl_send_ack_disable(unlsk, nlm); nlmsg_free(nlm); if (err < 0) { usnic_err("Failed to send nl route message to kernel, " "error %s\n", nl_geterror()); return err; } err = nl_socket_modify_cb(unlsk->nlh, NL_CB_MSG_IN, NL_CB_CUSTOM, rtnl_raw_parse_cb, &arg); if (err != 0) { usnic_err("Failed to setup callback function, error %s\n", nl_geterror()); return err; } while (!arg.replied) { err = nl_recvmsgs_default(unlsk->nlh); if (err < 0) { usnic_err("Failed to receive nl route message from " "kernel, error %s\n", nl_geterror()); return err; } /* * the return value of nl_recvmsgs_default does not tell * whether it returns because of successful read or socket * timeout. So we compare msg count before and after the call * to decide if no new message arrives. In such case, * this function needs to terminate to prevent the caller from * blocking forever * NL_CB_MSG_IN traps every received message, so * there should be no premature exit */ if (msg_cnt != arg.msg_count) msg_cnt = arg.msg_count; else break; } if (arg.found) { if (metric != NULL) { *metric = arg.metric; } return 0; } else { return -1; } }
/* * Create one netlink message for one interface * Contains port and master info as well as carrier and bridge state. */ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *port, u32 pid, u32 seq, int event, unsigned int flags, u32 filter_mask, const struct net_device *dev) { const struct net_bridge *br; struct ifinfomsg *hdr; struct nlmsghdr *nlh; u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; if (port) br = port->br; else br = netdev_priv(dev); br_debug(br, "br_fill_info event %d port %s master %s\n", event, dev->name, br->dev->name); nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); if (nlh == NULL) return -EMSGSIZE; hdr = nlmsg_data(nlh); hdr->ifi_family = AF_BRIDGE; hdr->__ifi_pad = 0; hdr->ifi_type = dev->type; hdr->ifi_index = dev->ifindex; hdr->ifi_flags = dev_get_flags(dev); hdr->ifi_change = 0; if (nla_put_string(skb, IFLA_IFNAME, dev->name) || nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) || nla_put_u32(skb, IFLA_MTU, dev->mtu) || nla_put_u8(skb, IFLA_OPERSTATE, operstate) || (dev->addr_len && nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || (dev->ifindex != dev->iflink && nla_put_u32(skb, IFLA_LINK, dev->iflink))) goto nla_put_failure; if (event == RTM_NEWLINK && port) { struct nlattr *nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED); if (nest == NULL || br_port_fill_attrs(skb, port) < 0) goto nla_put_failure; nla_nest_end(skb, nest); } /* Check if the VID information is requested */ if (filter_mask & RTEXT_FILTER_BRVLAN) { struct nlattr *af; const struct net_port_vlans *pv; struct bridge_vlan_info vinfo; u16 vid; u16 pvid; if (port) pv = nbp_get_vlan_info(port); else pv = br_get_vlan_info(br); if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) goto done; af = nla_nest_start(skb, IFLA_AF_SPEC); if (!af) goto nla_put_failure; pvid = br_get_pvid(pv); for (vid = find_first_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN); vid < BR_VLAN_BITMAP_LEN; vid = find_next_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN, vid+1)) { vinfo.vid = vid; vinfo.flags = 0; if (vid == pvid) vinfo.flags |= BRIDGE_VLAN_INFO_PVID; if (test_bit(vid, pv->untagged_bitmap)) vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED; if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, sizeof(vinfo), &vinfo)) goto nla_put_failure; } nla_nest_end(skb, af); } done: return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; }
static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, struct net_device *dev) { void *hdr; struct wpan_phy *phy; struct ieee802154_mlme_ops *ops; __le16 short_addr, pan_id; pr_debug("%s\n", __func__); hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags, IEEE802154_LIST_IFACE); if (!hdr) goto out; ops = ieee802154_mlme_ops(dev); phy = dev->ieee802154_ptr->wpan_phy; BUG_ON(!phy); get_device(&phy->dev); short_addr = ops->get_short_addr(dev); pan_id = ops->get_pan_id(dev); if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, dev->dev_addr) || nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr) || nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, pan_id)) goto nla_put_failure; if (ops->get_mac_params) { struct ieee802154_mac_params params; rtnl_lock(); ops->get_mac_params(dev, ¶ms); rtnl_unlock(); if (nla_put_s8(msg, IEEE802154_ATTR_TXPOWER, params.transmit_power) || nla_put_u8(msg, IEEE802154_ATTR_LBT_ENABLED, params.lbt) || nla_put_u8(msg, IEEE802154_ATTR_CCA_MODE, params.cca.mode) || nla_put_s32(msg, IEEE802154_ATTR_CCA_ED_LEVEL, params.cca_ed_level) || nla_put_u8(msg, IEEE802154_ATTR_CSMA_RETRIES, params.csma_retries) || nla_put_u8(msg, IEEE802154_ATTR_CSMA_MIN_BE, params.min_be) || nla_put_u8(msg, IEEE802154_ATTR_CSMA_MAX_BE, params.max_be) || nla_put_s8(msg, IEEE802154_ATTR_FRAME_RETRIES, params.frame_retries)) goto nla_put_failure; } wpan_phy_put(phy); genlmsg_end(msg, hdr); return 0; nla_put_failure: wpan_phy_put(phy); genlmsg_cancel(msg, hdr); out: return -EMSGSIZE; }
/** * virNetDevIPRouteAdd: * @ifname: the interface name * @addr: the IP network address (IPv4 or IPv6) * @prefix: number of 1 bits in the netmask * @gateway: via address for route (same as @addr) * * Add a route for a network IP address to an interface. This function * *does not* remove any previously added IP static routes. * * Returns 0 in case of success or -1 in case of error. */ int virNetDevIPRouteAdd(const char *ifname, virSocketAddrPtr addr, unsigned int prefix, virSocketAddrPtr gateway, unsigned int metric) { int ret = -1; struct nl_msg *nlmsg = NULL; struct nlmsghdr *resp = NULL; unsigned int recvbuflen; unsigned int ifindex; struct rtmsg rtmsg; void *gatewayData = NULL; void *addrData = NULL; size_t addrDataLen; int errCode; virSocketAddr defaultAddr; virSocketAddrPtr actualAddr; char *toStr = NULL; char *viaStr = NULL; actualAddr = addr; /* If we have no valid network address, then use the default one */ if (!addr || !VIR_SOCKET_ADDR_VALID(addr)) { VIR_DEBUG("computing default address"); int family = VIR_SOCKET_ADDR_FAMILY(gateway); if (family == AF_INET) { if (virSocketAddrParseIPv4(&defaultAddr, VIR_SOCKET_ADDR_IPV4_ALL) < 0) goto cleanup; } else { if (virSocketAddrParseIPv6(&defaultAddr, VIR_SOCKET_ADDR_IPV6_ALL) < 0) goto cleanup; } actualAddr = &defaultAddr; } toStr = virSocketAddrFormat(actualAddr); viaStr = virSocketAddrFormat(gateway); VIR_DEBUG("Adding route %s/%d via %s", toStr, prefix, viaStr); if (virNetDevGetIPAddressBinary(actualAddr, &addrData, &addrDataLen) < 0 || virNetDevGetIPAddressBinary(gateway, &gatewayData, &addrDataLen) < 0) goto cleanup; /* Get the interface index */ if ((ifindex = if_nametoindex(ifname)) == 0) goto cleanup; if (!(nlmsg = nlmsg_alloc_simple(RTM_NEWROUTE, NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL))) { virReportOOMError(); goto cleanup; } memset(&rtmsg, 0, sizeof(rtmsg)); rtmsg.rtm_family = VIR_SOCKET_ADDR_FAMILY(gateway); rtmsg.rtm_table = RT_TABLE_MAIN; rtmsg.rtm_scope = RT_SCOPE_UNIVERSE; rtmsg.rtm_protocol = RTPROT_BOOT; rtmsg.rtm_type = RTN_UNICAST; rtmsg.rtm_dst_len = prefix; if (nlmsg_append(nlmsg, &rtmsg, sizeof(rtmsg), NLMSG_ALIGNTO) < 0) goto buffer_too_small; if (prefix > 0 && nla_put(nlmsg, RTA_DST, addrDataLen, addrData) < 0) goto buffer_too_small; if (nla_put(nlmsg, RTA_GATEWAY, addrDataLen, gatewayData) < 0) goto buffer_too_small; if (nla_put_u32(nlmsg, RTA_OIF, ifindex) < 0) goto buffer_too_small; if (metric > 0 && nla_put_u32(nlmsg, RTA_PRIORITY, metric) < 0) goto buffer_too_small; if (virNetlinkCommand(nlmsg, &resp, &recvbuflen, 0, 0, NETLINK_ROUTE, 0) < 0) goto cleanup; if ((errCode = virNetlinkGetErrorCode(resp, recvbuflen)) < 0) { virReportSystemError(errCode, _("Error adding route to %s"), ifname); goto cleanup; } ret = 0; cleanup: VIR_FREE(toStr); VIR_FREE(viaStr); nlmsg_free(nlmsg); return ret; buffer_too_small: virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("allocated netlink buffer is too small")); goto cleanup; }
static int tcf_sample_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_sample *s = to_sample(a); struct tc_sample opt = { .index = s->tcf_index, .refcnt = refcount_read(&s->tcf_refcnt) - ref, .bindcnt = atomic_read(&s->tcf_bindcnt) - bind, }; struct tcf_t t; spin_lock_bh(&s->tcf_lock); opt.action = s->tcf_action; if (nla_put(skb, TCA_SAMPLE_PARMS, sizeof(opt), &opt)) goto nla_put_failure; tcf_tm_dump(&t, &s->tcf_tm); if (nla_put_64bit(skb, TCA_SAMPLE_TM, sizeof(t), &t, TCA_SAMPLE_PAD)) goto nla_put_failure; if (nla_put_u32(skb, TCA_SAMPLE_RATE, s->rate)) goto nla_put_failure; if (s->truncate) if (nla_put_u32(skb, TCA_SAMPLE_TRUNC_SIZE, s->trunc_size)) goto nla_put_failure; if (nla_put_u32(skb, TCA_SAMPLE_PSAMPLE_GROUP, s->psample_group_num)) goto nla_put_failure; spin_unlock_bh(&s->tcf_lock); return skb->len; nla_put_failure: spin_unlock_bh(&s->tcf_lock); nlmsg_trim(skb, b); return -1; } static int tcf_sample_walker(struct net *net, struct sk_buff *skb, struct netlink_callback *cb, int type, const struct tc_action_ops *ops, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, sample_net_id); return tcf_generic_walker(tn, skb, cb, type, ops, extack); } static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, sample_net_id); return tcf_idr_search(tn, a, index); } static struct tc_action_ops act_sample_ops = { .kind = "sample", .type = TCA_ACT_SAMPLE, .owner = THIS_MODULE, .act = tcf_sample_act, .dump = tcf_sample_dump, .init = tcf_sample_init, .cleanup = tcf_sample_cleanup, .walk = tcf_sample_walker, .lookup = tcf_sample_search, .size = sizeof(struct tcf_sample), }; static __net_init int sample_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, sample_net_id); return tc_action_net_init(tn, &act_sample_ops); } static void __net_exit sample_exit_net(struct list_head *net_list) { tc_action_net_exit(net_list, sample_net_id); } static struct pernet_operations sample_net_ops = { .init = sample_init_net, .exit_batch = sample_exit_net, .id = &sample_net_id, .size = sizeof(struct tc_action_net), }; static int __init sample_init_module(void) { return tcf_register_action(&act_sample_ops, &sample_net_ops); } static void __exit sample_cleanup_module(void) { tcf_unregister_action(&act_sample_ops, &sample_net_ops); } module_init(sample_init_module); module_exit(sample_cleanup_module); MODULE_AUTHOR("Yotam Gigi <*****@*****.**>"); MODULE_DESCRIPTION("Packet sampling action"); MODULE_LICENSE("GPL v2");
static int wl_cfgvendor_gscan_get_batch_results(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len) { int err = 0; struct wl_priv *cfg = wiphy_priv(wiphy); gscan_results_cache_t *results, *iter; uint32 reply_len, complete = 1; int32 mem_needed, num_results_iter; wifi_gscan_result_t *ptr; uint16 num_scan_ids, num_results; struct sk_buff *skb; struct nlattr *scan_hdr, *complete_flag; err = dhd_dev_wait_batch_results_complete(wl_to_prmry_ndev(cfg)); if (err != BCME_OK) return -EBUSY; err = dhd_dev_pno_lock_access_batch_results(wl_to_prmry_ndev(cfg)); if (err != BCME_OK) { WL_ERR(("Can't obtain lock to access batch results %d\n", err)); return -EBUSY; } results = dhd_dev_pno_get_gscan(wl_to_prmry_ndev(cfg), DHD_PNO_GET_BATCH_RESULTS, NULL, &reply_len); if (!results) { WL_ERR(("No results to send %d\n", err)); err = wl_cfgvendor_send_cmd_reply(wiphy, wl_to_prmry_ndev(cfg), results, 0); if (unlikely(err)) WL_ERR(("Vendor Command reply failed ret:%d \n", err)); dhd_dev_pno_unlock_access_batch_results(wl_to_prmry_ndev(cfg)); return err; } num_scan_ids = reply_len & 0xFFFF; num_results = (reply_len & 0xFFFF0000) >> 16; mem_needed = (num_results * sizeof(wifi_gscan_result_t)) + (num_scan_ids * GSCAN_BATCH_RESULT_HDR_LEN) + VENDOR_REPLY_OVERHEAD + SCAN_RESULTS_COMPLETE_FLAG_LEN; if (mem_needed > (int32)NLMSG_DEFAULT_SIZE) { mem_needed = (int32)NLMSG_DEFAULT_SIZE; complete = 0; } WL_TRACE(("complete %d mem_needed %d max_mem %d\n", complete, mem_needed, (int)NLMSG_DEFAULT_SIZE)); /* Alloc the SKB for vendor_event */ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed); if (unlikely(!skb)) { WL_ERR(("skb alloc failed")); dhd_dev_pno_unlock_access_batch_results(wl_to_prmry_ndev(cfg)); return -ENOMEM; } iter = results; complete_flag = nla_reserve(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE, sizeof(complete)); mem_needed = mem_needed - (SCAN_RESULTS_COMPLETE_FLAG_LEN + VENDOR_REPLY_OVERHEAD); while (iter) { num_results_iter = (mem_needed - GSCAN_BATCH_RESULT_HDR_LEN)/sizeof(wifi_gscan_result_t); if (num_results_iter <= 0 || ((iter->tot_count - iter->tot_consumed) > num_results_iter)) break; scan_hdr = nla_nest_start(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS); /* no more room? we are done then (for now) */ if (scan_hdr == NULL) { complete = 0; break; } nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_ID, iter->scan_id); nla_put_u8(skb, GSCAN_ATTRIBUTE_SCAN_FLAGS, iter->flag); num_results_iter = iter->tot_count - iter->tot_consumed; nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_OF_RESULTS, num_results_iter); if (num_results_iter) { ptr = &iter->results[iter->tot_consumed]; iter->tot_consumed += num_results_iter; nla_put(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS, num_results_iter * sizeof(wifi_gscan_result_t), ptr); } nla_nest_end(skb, scan_hdr); mem_needed -= GSCAN_BATCH_RESULT_HDR_LEN + (num_results_iter * sizeof(wifi_gscan_result_t)); iter = iter->next; } memcpy(nla_data(complete_flag), &complete, sizeof(complete)); dhd_dev_gscan_batch_cache_cleanup(wl_to_prmry_ndev(cfg)); dhd_dev_pno_unlock_access_batch_results(wl_to_prmry_ndev(cfg)); return cfg80211_vendor_cmd_reply(skb); }
/* Method to conditionally transmit a Netlink packet containing one or more * packets of information from the status FIFO */ static int tx_netlink_status(struct aes3_rx *rx) { struct sk_buff *skb; void *msgHead; int returnValue = 0; uint32_t status; /* Make sure we have something to do */ if(rx->statusReadyAes != AES_NEW_STATUS_READY && rx->statusReadyMeter != AES_NEW_STATUS_READY) { return(returnValue); } /* We will send a packet, allocate a socket buffer */ skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if(skb == NULL) return(-ENOMEM); /* Create the message headers. AES status messages get priority. */ msgHead = genlmsg_put(skb, 0, rx->netlinkSequence++, &events_genl_family, 0, (rx->statusReadyAes == AES_NEW_STATUS_READY ? LABX_AES_EVENTS_C_AES_STATUS_PACKETS : LABX_AES_EVENTS_C_METER_STATUS_PACKETS)); if(msgHead == NULL) { returnValue = -ENOMEM; goto tx_failure; } /* Write the AES device's ID, properly translated for userspace, to identify the * message source. The full ID is required since this driver can be used encapsulated * within any number of more complex devices. */ returnValue = nla_put_u32(skb, LABX_AES_EVENTS_A_AES_DEVICE, new_encode_dev(rx->deviceNode)); if(returnValue != 0) goto tx_failure; /* AES interrupts get priority. Calling code must make sure to call us multiple * times if there are multiple statuses to send out. */ if(rx->statusReadyAes == AES_NEW_STATUS_READY) { /* Clear the AES status flag */ rx->statusReadyAes = AES_STATUS_IDLE; /* Read the AES rx status register and send it out */ status = XIo_In32(REGISTER_ADDRESS(rx, AES_RX_STREAM_STATUS_REG)); returnValue = nla_put_u32(skb, LABX_AES_EVENTS_A_AES_RX_STATUS, status); if(returnValue != 0) goto tx_failure; /* Read the AES tx status register and send it out */ status = XIo_In32(REGISTER_ADDRESS(rx, AES_TX_STREAM_STATUS_REG)); returnValue = nla_put_u32(skb, LABX_AES_EVENTS_A_AES_TX_STATUS, status); if(returnValue != 0) goto tx_failure; } else if(rx->statusReadyMeter == AES_NEW_STATUS_READY) { /* Clear the metering status flag */ rx->statusReadyMeter = AES_STATUS_IDLE; /* Read the metering rx status register and send it out */ status = XIo_In32(REGISTER_ADDRESS(rx, AES_RX_AUDIO_METER_REG)); returnValue = nla_put_u32(skb, LABX_AES_EVENTS_A_METER_RX_STATUS, status); if(returnValue != 0) goto tx_failure; /* Read the metering tx status register and send it out */ status = XIo_In32(REGISTER_ADDRESS(rx, AES_TX_AUDIO_METER_REG)); returnValue = nla_put_u32(skb, LABX_AES_EVENTS_A_METER_TX_STATUS, status); if(returnValue != 0) goto tx_failure; } /* Finalize the message and multicast it */ genlmsg_end(skb, msgHead); returnValue = genlmsg_multicast(skb, 0, labx_aes_mcast.id, GFP_ATOMIC); switch(returnValue) { case 0: case -ESRCH: /* Success or no process was listening, simply break */ break; default: /* This is an actual error, print the return code */ printk(KERN_INFO DRIVER_NAME ": Failure delivering multicast Netlink message: %d\n", returnValue); goto tx_failure; } tx_failure: return(returnValue); }
int ompi_btl_usnic_nl_ip_rt_lookup(struct usnic_rtnl_sk *unlsk, const char *src_ifname, uint32_t src_addr, uint32_t dst_addr, int *metric) { struct nl_msg *nlm; struct rtmsg rmsg; struct nl_lookup_arg arg; int msg_cnt; int err; int oif; oif = if_nametoindex(src_ifname); if (0 == oif) { return errno; } arg.nh_addr = 0; arg.oif = oif; arg.found = 0; arg.replied = 0; arg.unlsk = unlsk; arg.msg_count = msg_cnt = 0; memset(&rmsg, 0, sizeof(rmsg)); rmsg.rtm_family = AF_INET; rmsg.rtm_dst_len = sizeof(dst_addr)*8; rmsg.rtm_src_len = sizeof(src_addr)*8; nlm = nlmsg_alloc_simple(RTM_GETROUTE, 0); nlmsg_append(nlm, &rmsg, sizeof(rmsg), NLMSG_ALIGNTO); nla_put_u32(nlm, RTA_DST, dst_addr); nla_put_u32(nlm, RTA_SRC, src_addr); err = rtnl_send_ack_disable(unlsk, nlm); nlmsg_free(nlm); if (err < 0) { usnic_err("Failed to send rtnl query %s\n", nl_geterror(err)); return err; } err = nl_socket_modify_cb(unlsk->sock, NL_CB_MSG_IN, NL_CB_CUSTOM, rtnl_raw_parse_cb, &arg); if (err != 0) { usnic_err("Failed to setup callback function, error %s\n", nl_geterror(err)); return err; } while (!arg.replied) { err = nl_recvmsgs_default(unlsk->sock); if (err < 0) { /* err will be returned as -NLE_AGAIN if the socket times out */ usnic_err("Failed to receive rtnl query results %s\n", nl_geterror(err)); return err; } } if (arg.found) { if (metric != NULL) { *metric = arg.metric; } return 0; } else { return -1; } }
static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) { struct gred_sched *table = qdisc_priv(sch); struct nlattr *parms, *vqs, *opts = NULL; int i; u32 max_p[MAX_DPs]; struct tc_gred_sopt sopt = { .DPs = table->DPs, .def_DP = table->def, .grio = gred_rio_mode(table), .flags = table->red_flags, }; if (gred_offload_dump_stats(sch)) goto nla_put_failure; opts = nla_nest_start_noflag(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt)) goto nla_put_failure; for (i = 0; i < MAX_DPs; i++) { struct gred_sched_data *q = table->tab[i]; max_p[i] = q ? q->parms.max_P : 0; } if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit)) goto nla_put_failure; /* Old style all-in-one dump of VQs */ parms = nla_nest_start_noflag(skb, TCA_GRED_PARMS); if (parms == NULL) goto nla_put_failure; for (i = 0; i < MAX_DPs; i++) { struct gred_sched_data *q = table->tab[i]; struct tc_gred_qopt opt; unsigned long qavg; memset(&opt, 0, sizeof(opt)); if (!q) { /* hack -- fix at some point with proper message This is how we indicate to tc that there is no VQ at this DP */ opt.DP = MAX_DPs + i; goto append_opt; } opt.limit = q->limit; opt.DP = q->DP; opt.backlog = gred_backlog(table, q, sch); opt.prio = q->prio; opt.qth_min = q->parms.qth_min >> q->parms.Wlog; opt.qth_max = q->parms.qth_max >> q->parms.Wlog; opt.Wlog = q->parms.Wlog; opt.Plog = q->parms.Plog; opt.Scell_log = q->parms.Scell_log; opt.other = q->stats.other; opt.early = q->stats.prob_drop; opt.forced = q->stats.forced_drop; opt.pdrop = q->stats.pdrop; opt.packets = q->packetsin; opt.bytesin = q->bytesin; if (gred_wred_mode(table)) gred_load_wred_set(table, q); qavg = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg >> q->parms.Wlog); opt.qave = qavg >> q->parms.Wlog; append_opt: if (nla_append(skb, sizeof(opt), &opt) < 0) goto nla_put_failure; } nla_nest_end(skb, parms); /* Dump the VQs again, in more structured way */ vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST); if (!vqs) goto nla_put_failure; for (i = 0; i < MAX_DPs; i++) { struct gred_sched_data *q = table->tab[i]; struct nlattr *vq; if (!q) continue; vq = nla_nest_start_noflag(skb, TCA_GRED_VQ_ENTRY); if (!vq) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags)) goto nla_put_failure; /* Stats */ if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin, TCA_GRED_VQ_PAD)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG, gred_backlog(table, q, sch))) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP, q->stats.prob_drop)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK, q->stats.prob_mark)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP, q->stats.forced_drop)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK, q->stats.forced_mark)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other)) goto nla_put_failure; nla_nest_end(skb, vq); } nla_nest_end(skb, vqs); return nla_nest_end(skb, opts); nla_put_failure: nla_nest_cancel(skb, opts); return -EMSGSIZE; } static void gred_destroy(struct Qdisc *sch) { struct gred_sched *table = qdisc_priv(sch); int i; for (i = 0; i < table->DPs; i++) { if (table->tab[i]) gred_destroy_vq(table->tab[i]); } gred_offload(sch, TC_GRED_DESTROY); } static struct Qdisc_ops gred_qdisc_ops __read_mostly = { .id = "gred", .priv_size = sizeof(struct gred_sched), .enqueue = gred_enqueue, .dequeue = gred_dequeue, .peek = qdisc_peek_head, .init = gred_init, .reset = gred_reset, .destroy = gred_destroy, .change = gred_change, .dump = gred_dump, .owner = THIS_MODULE, }; static int __init gred_module_init(void) { return register_qdisc(&gred_qdisc_ops); } static void __exit gred_module_exit(void) { unregister_qdisc(&gred_qdisc_ops); } module_init(gred_module_init) module_exit(gred_module_exit) MODULE_LICENSE("GPL");
static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_skbedit *d = a->priv; struct tc_skbedit opt = { .index = d->tcf_index, .refcnt = d->tcf_refcnt - ref, .bindcnt = d->tcf_bindcnt - bind, .action = d->tcf_action, }; struct tcf_t t; if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt)) goto nla_put_failure; if ((d->flags & SKBEDIT_F_PRIORITY) && nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, d->priority)) goto nla_put_failure; if ((d->flags & SKBEDIT_F_QUEUE_MAPPING) && nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, d->queue_mapping)) goto nla_put_failure; if ((d->flags & SKBEDIT_F_MARK) && nla_put_u32(skb, TCA_SKBEDIT_MARK, d->mark)) goto nla_put_failure; if ((d->flags & SKBEDIT_F_PTYPE) && nla_put_u16(skb, TCA_SKBEDIT_PTYPE, d->ptype)) goto nla_put_failure; tcf_tm_dump(&t, &d->tcf_tm); if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD)) goto nla_put_failure; return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static int tcf_skbedit_walker(struct net *net, struct sk_buff *skb, struct netlink_callback *cb, int type, struct tc_action *a) { struct tc_action_net *tn = net_generic(net, skbedit_net_id); return tcf_generic_walker(tn, skb, cb, type, a); } static int tcf_skbedit_search(struct net *net, struct tc_action *a, u32 index) { struct tc_action_net *tn = net_generic(net, skbedit_net_id); return tcf_hash_search(tn, a, index); } static struct tc_action_ops act_skbedit_ops = { .kind = "skbedit", .type = TCA_ACT_SKBEDIT, .owner = THIS_MODULE, .act = tcf_skbedit, .dump = tcf_skbedit_dump, .init = tcf_skbedit_init, .walk = tcf_skbedit_walker, .lookup = tcf_skbedit_search, }; static __net_init int skbedit_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, skbedit_net_id); return tc_action_net_init(tn, &act_skbedit_ops, SKBEDIT_TAB_MASK); } static void __net_exit skbedit_exit_net(struct net *net) { struct tc_action_net *tn = net_generic(net, skbedit_net_id); tc_action_net_exit(tn); } static struct pernet_operations skbedit_net_ops = { .init = skbedit_init_net, .exit = skbedit_exit_net, .id = &skbedit_net_id, .size = sizeof(struct tc_action_net), }; MODULE_AUTHOR("Alexander Duyck, <*****@*****.**>"); MODULE_DESCRIPTION("SKB Editing"); MODULE_LICENSE("GPL"); static int __init skbedit_init_module(void) { return tcf_register_action(&act_skbedit_ops, &skbedit_net_ops); } static void __exit skbedit_cleanup_module(void) { tcf_unregister_action(&act_skbedit_ops, &skbedit_net_ops); } module_init(skbedit_init_module); module_exit(skbedit_cleanup_module);
int nl80211_set_ap(struct nl80211_data *ctx) { struct nl_msg *msg = NULL; int ret; u16 beacon_period = 100; u8 beacon_DTIM = 1; char* ssid_name = "TEST"; u8 ssid_sz = strlen(ssid_name); int j; u8 rates[NL80211_MAX_SUPP_RATES]; u8 nbBascis = (u8)(sizeof(BASIC_RATES)/sizeof(int)); // format basics rates for (j = 0; j<nbBascis; j++) rates[j] = BASIC_RATE(BASIC_RATES[j]); //portion of the beacon before the TIM IE u8 * head = NULL; int head_sz = 0; //portion of the beacon after the TIM IE u8 * tail = NULL; int tail_sz = 0; // BEACON HEAD { u8 * pos = NULL; packet_element_t macheader = { 0 }; packet_element_t ssid = { 0 }; packet_element_t rates = { 0 }; packet_element_t ds = { 0 }; head_sz += hostapd_header_beacon(&macheader, ctx->macaddr, beacon_period); head_sz += hostapd_eid_ssid(&ssid, ssid_name); head_sz += hostapd_eid_supp_rates(&rates); head_sz += hostapd_eid_ds_params(&ds); head = (u8*)malloc(head_sz); pos = head; pos = packet_element_concatnfree(pos, macheader); pos = packet_element_concatnfree(pos, ssid); pos = packet_element_concatnfree(pos, rates); pos = packet_element_concatnfree(pos, ds); fhexdump(stderr, "nl80211: Beacon head", head, head_sz); } // BEACON TAIL { u8 * pos = NULL; packet_element_t rates = { 0 }; //packet_element_t emilie = { 0 }; tail_sz += hostapd_eid_ext_rates(&rates); //tail_sz += hostapd_eid_emilie(&emilie); tail = (u8*)malloc(tail_sz); pos = tail; pos = packet_element_concatnfree(pos, rates); //pos = packet_element_concatnfree(pos, emilie); fhexdump(stderr, "nl80211: Beacon tail", tail, tail_sz); } // NL80211 BEACON SETTING msg = nlmsg_alloc(); if (!msg) goto fail; if (!nl80211_cmd(ctx, msg, 0, NL80211_CMD_NEW_BEACON)) goto fail; if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, ctx->ifindex)) goto fail; if (nla_put(msg, NL80211_ATTR_BEACON_HEAD, head_sz, head)) goto fail; if (nla_put(msg, NL80211_ATTR_BEACON_TAIL, tail_sz, tail)) goto fail; if (nla_put_u32(msg, NL80211_ATTR_BEACON_INTERVAL, beacon_period)) goto fail; if (nla_put_u32(msg, NL80211_ATTR_DTIM_PERIOD, beacon_DTIM)) goto fail; if (nla_put(msg, NL80211_ATTR_SSID, ssid_sz, ssid_name)) goto fail; if (nla_put_u32(msg, NL80211_ATTR_HIDDEN_SSID, NL80211_HIDDEN_SSID_NOT_IN_USE)) goto fail; if (nla_put_flag(msg, NL80211_ATTR_PRIVACY)) goto fail; if (nla_put_u32(msg, NL80211_ATTR_AUTH_TYPE, NL80211_AUTHTYPE_OPEN_SYSTEM)) goto fail; if (nla_put_u32(msg, NL80211_ATTR_SMPS_MODE, NL80211_SMPS_OFF)) goto fail; ret = send_and_recv_msgs(ctx, msg, NULL, NULL); if (ret) { fprintf(stderr, "nl80211: Beacon set failed: %d (%s)\n", ret, strerror(-ret)); goto fail; } // NL80211 BSS ADVDANCED SETTING msg = nlmsg_alloc(); if (!msg) goto fail; if (!nl80211_cmd(ctx, msg, 0, NL80211_CMD_SET_BSS)) goto fail; if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, ctx->ifindex)) goto fail; if (nla_put_u8(msg, NL80211_ATTR_BSS_CTS_PROT, 0)) goto fail; if (nla_put_u8(msg, NL80211_ATTR_BSS_SHORT_PREAMBLE, 0)) goto fail; if (nla_put_u8(msg, NL80211_ATTR_BSS_SHORT_SLOT_TIME, 1)) goto fail; if (nla_put_u16(msg, NL80211_ATTR_BSS_HT_OPMODE, 0)) goto fail; if (nla_put_u8(msg, NL80211_ATTR_AP_ISOLATE, 0)) goto fail; if (nla_put(msg, NL80211_ATTR_BSS_BASIC_RATES, nbBascis, rates)) goto fail;; ret = send_and_recv_msgs(ctx, msg, NULL, NULL); if (ret) { fprintf(stderr, "nl80211: advanced settings failed: %d (%s)\n", ret, strerror(-ret)); goto fail; } if (head) free(head); if (tail) free(tail); return 0; fail: if (msg) nlmsg_free(msg); if (head) free(head); if (tail) free(tail); return -1; }
int kg2_genl_set_output_timing(AVC_CMD_TIMING_PARAM * timing) { int ret = 0; void * hdr; struct sk_buff * msg; msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (msg == NULL) { return -ENOMEM; } // Create Generic Netlink message header hdr = genlmsg_put(msg, g_daemon_pid, 0, &kg2_genl_family, NLM_F_REQUEST, KG2_GENL_CMD_SET_OUTPUT_TIMING); if (hdr == NULL) { ret = -EMSGSIZE; goto err; } // Add a KG2_GENL_ATTR_MSG attribute ret = nla_put_u16(msg, KG2_GENL_ATTR_HTOTAL, timing->HTotal); if (ret) { ret = -ENOBUFS; goto err; } ret = nla_put_u16(msg, KG2_GENL_ATTR_HACTIVE, timing->HActive); if (ret) { ret = -ENOBUFS; goto err; } ret = nla_put_u16(msg, KG2_GENL_ATTR_HFRONTPORCH, timing->HFrontPorch); if (ret) { ret = -ENOBUFS; goto err; } ret = nla_put_u8(msg, KG2_GENL_ATTR_HSYNCWIDTH, timing->HSyncWidth); if (ret) { ret = -ENOBUFS; goto err; } ret = nla_put_u32(msg, KG2_GENL_ATTR_HPOLARITY, timing->HPolarity); if (ret) { ret = -ENOBUFS; goto err; } ret = nla_put_u16(msg, KG2_GENL_ATTR_VTOTAL, timing->VTotal); if (ret) { ret = -ENOBUFS; goto err; } ret = nla_put_u16(msg, KG2_GENL_ATTR_VACTIVE, timing->VActive); if (ret) { ret = -ENOBUFS; goto err; } ret = nla_put_u16(msg, KG2_GENL_ATTR_VFRONTPORCH, timing->VFrontPorch); if (ret) { ret = -ENOBUFS; goto err; } ret = nla_put_u8(msg, KG2_GENL_ATTR_VSYNCWIDTH, timing->VSyncWidth); if (ret) { ret = -ENOBUFS; goto err; } ret = nla_put_u32(msg, KG2_GENL_ATTR_VPOLARITY, timing->VPolarity); if (ret) { ret = -ENOBUFS; goto err; } ret = nla_put_u32(msg, KG2_GENL_ATTR_ASPRATIO, timing->AspRatio); if (ret) { ret = -ENOBUFS; goto err; } ret = nla_put_u8(msg, KG2_GENL_ATTR_ISPROGRESSIVE, timing->IsProgressive); if (ret) { ret = -ENOBUFS; goto err; } ret = nla_put_u16(msg, KG2_GENL_ATTR_REFRATE, timing->RefRate); if (ret) { ret = -ENOBUFS; goto err; } // Finalize the message genlmsg_end(msg, hdr); // Send the message ret = genlmsg_unicast(&init_net, msg, g_daemon_pid); return ret; err: nlmsg_free(msg); return ret; }
/** * virNetDevMacVLanCreate: * * @ifname: The name the interface is supposed to have; optional parameter * @type: The type of device, i.e., "macvtap", "macvlan" * @macaddress: The MAC address of the device * @srcdev: The name of the 'link' device * @macvlan_mode: The macvlan mode to use * @retry: Pointer to integer that will be '1' upon return if an interface * with the same name already exists and it is worth to try * again with a different name * * Create a macvtap device with the given properties. * * Returns 0 on success, -1 on fatal error. */ int virNetDevMacVLanCreate(const char *ifname, const char *type, const unsigned char *macaddress, const char *srcdev, uint32_t macvlan_mode, int *retry) { int rc = -1; struct nlmsghdr *resp; struct nlmsgerr *err; struct ifinfomsg ifinfo = { .ifi_family = AF_UNSPEC }; int ifindex; unsigned char *recvbuf = NULL; unsigned int recvbuflen; struct nl_msg *nl_msg; struct nlattr *linkinfo, *info_data; if (virNetDevGetIndex(srcdev, &ifindex) < 0) return -1; *retry = 0; nl_msg = nlmsg_alloc_simple(RTM_NEWLINK, NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL); if (!nl_msg) { virReportOOMError(); return -1; } if (nlmsg_append(nl_msg, &ifinfo, sizeof(ifinfo), NLMSG_ALIGNTO) < 0) goto buffer_too_small; if (nla_put_u32(nl_msg, IFLA_LINK, ifindex) < 0) goto buffer_too_small; if (nla_put(nl_msg, IFLA_ADDRESS, VIR_MAC_BUFLEN, macaddress) < 0) goto buffer_too_small; if (ifname && nla_put(nl_msg, IFLA_IFNAME, strlen(ifname)+1, ifname) < 0) goto buffer_too_small; if (!(linkinfo = nla_nest_start(nl_msg, IFLA_LINKINFO))) goto buffer_too_small; if (nla_put(nl_msg, IFLA_INFO_KIND, strlen(type), type) < 0) goto buffer_too_small; if (macvlan_mode > 0) { if (!(info_data = nla_nest_start(nl_msg, IFLA_INFO_DATA))) goto buffer_too_small; if (nla_put(nl_msg, IFLA_MACVLAN_MODE, sizeof(macvlan_mode), &macvlan_mode) < 0) goto buffer_too_small; nla_nest_end(nl_msg, info_data); } nla_nest_end(nl_msg, linkinfo); if (virNetlinkCommand(nl_msg, &recvbuf, &recvbuflen, 0) < 0) { goto cleanup; } if (recvbuflen < NLMSG_LENGTH(0) || recvbuf == NULL) goto malformed_resp; resp = (struct nlmsghdr *)recvbuf; switch (resp->nlmsg_type) { case NLMSG_ERROR: err = (struct nlmsgerr *)NLMSG_DATA(resp); if (resp->nlmsg_len < NLMSG_LENGTH(sizeof(*err))) goto malformed_resp; switch (err->error) { case 0: break; case -EEXIST: *retry = 1; goto cleanup; default: virReportSystemError(-err->error, _("error creating %s type of interface"), type); goto cleanup; } break; case NLMSG_DONE: break; default: goto malformed_resp; } rc = 0; cleanup: nlmsg_free(nl_msg); VIR_FREE(recvbuf); return rc; malformed_resp: virNetDevError(VIR_ERR_INTERNAL_ERROR, "%s", _("malformed netlink response message")); goto cleanup; buffer_too_small: virNetDevError(VIR_ERR_INTERNAL_ERROR, "%s", _("allocated netlink buffer is too small")); goto cleanup; }
static int choke_dump(struct Qdisc *sch, struct sk_buff *skb) { struct choke_sched_data *q = qdisc_priv(sch); struct nlattr *opts = NULL; struct tc_red_qopt opt = { .limit = q->limit, .flags = q->flags, .qth_min = q->parms.qth_min >> q->parms.Wlog, .qth_max = q->parms.qth_max >> q->parms.Wlog, .Wlog = q->parms.Wlog, .Plog = q->parms.Plog, .Scell_log = q->parms.Scell_log, }; opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) || nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P)) goto nla_put_failure; return nla_nest_end(skb, opts); nla_put_failure: nla_nest_cancel(skb, opts); return -EMSGSIZE; } static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct choke_sched_data *q = qdisc_priv(sch); struct tc_choke_xstats st = { .early = q->stats.prob_drop + q->stats.forced_drop, .marked = q->stats.prob_mark + q->stats.forced_mark, .pdrop = q->stats.pdrop, .other = q->stats.other, .matched = q->stats.matched, }; return gnet_stats_copy_app(d, &st, sizeof(st)); } static void choke_destroy(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); choke_free(q->tab); } static struct sk_buff *choke_peek_head(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); return (q->head != q->tail) ? q->tab[q->head] : NULL; } static struct Qdisc_ops choke_qdisc_ops __read_mostly = { .id = "choke", .priv_size = sizeof(struct choke_sched_data), .enqueue = choke_enqueue, .dequeue = choke_dequeue, .peek = choke_peek_head, .init = choke_init, .destroy = choke_destroy, .reset = choke_reset, .change = choke_change, .dump = choke_dump, .dump_stats = choke_dump_stats, .owner = THIS_MODULE, }; static int __init choke_module_init(void) { return register_qdisc(&choke_qdisc_ops); } static void __exit choke_module_exit(void) { unregister_qdisc(&choke_qdisc_ops); } module_init(choke_module_init) module_exit(choke_module_exit) MODULE_LICENSE("GPL");