int ieee802154_nl_disassoc_indic(struct net_device *dev, struct ieee802154_addr *addr, u8 reason) { struct sk_buff *msg; pr_debug("%s\n", __func__); msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_INDIC); if (!msg) return -ENOBUFS; NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, dev->dev_addr); if (addr->addr_type == IEEE802154_ADDR_LONG) NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN, addr->hwaddr); else NLA_PUT_U16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR, addr->short_addr); NLA_PUT_U8(msg, IEEE802154_ATTR_REASON, reason); return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; }
int ieee802154_nl_assoc_indic(struct net_device *dev, struct ieee802154_addr *addr, u8 cap) { struct sk_buff *msg; pr_debug("%s\n", __func__); if (addr->addr_type != IEEE802154_ADDR_LONG) { pr_err("%s: received non-long source address!\n", __func__); return -EINVAL; } msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_INDIC); if (!msg) return -ENOBUFS; NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, dev->dev_addr); NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN, addr->hwaddr); NLA_PUT_U8(msg, IEEE802154_ATTR_CAPABILITY, cap); return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; }
int ieee802154_nl_scan_confirm(struct net_device *dev, u8 status, u8 scan_type, u32 unscanned, u8 page, u8 *edl/* */) { struct sk_buff *msg; pr_debug("%s\n", __func__); msg = ieee802154_nl_create(0, IEEE802154_SCAN_CONF); if (!msg) return -ENOBUFS; NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, dev->dev_addr); NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type); NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned); NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, page); if (edl) NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl); return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; }
static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_skbedit *d = a->priv; struct tc_skbedit opt; struct tcf_t t; opt.index = d->tcf_index; opt.refcnt = d->tcf_refcnt - ref; opt.bindcnt = d->tcf_bindcnt - bind; opt.action = d->tcf_action; NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt); if (d->flags & SKBEDIT_F_PRIORITY) NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority), &d->priority); if (d->flags & SKBEDIT_F_QUEUE_MAPPING) NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING, sizeof(d->queue_mapping), &d->queue_mapping); t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(d->tcf_tm.expires); NLA_PUT(skb, TCA_SKBEDIT_TM, sizeof(t), &t); return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; }
static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target, struct netlink_callback *cb, int flags) { void *hdr; hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, &nfc_genl_family, flags, NFC_CMD_GET_TARGET); if (!hdr) return -EMSGSIZE; genl_dump_check_consistent(cb, hdr, &nfc_genl_family); NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target->idx); NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols); NLA_PUT_U16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res); NLA_PUT_U8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res); if (target->nfcid1_len > 0) NLA_PUT(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len, target->nfcid1); if (target->sensb_res_len > 0) NLA_PUT(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len, target->sensb_res); if (target->sensf_res_len > 0) NLA_PUT(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len, target->sensf_res); return genlmsg_end(msg, hdr); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; }
static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_skbedit *d = a->priv; struct tc_skbedit opt = { .index = d->tcf_index, .refcnt = d->tcf_refcnt - ref, .bindcnt = d->tcf_bindcnt - bind, .action = d->tcf_action, }; struct tcf_t t; NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt); if (d->flags & SKBEDIT_F_PRIORITY) NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority), &d->priority); if (d->flags & SKBEDIT_F_QUEUE_MAPPING) NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING, sizeof(d->queue_mapping), &d->queue_mapping); t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(d->tcf_tm.expires); NLA_PUT(skb, TCA_SKBEDIT_TM, sizeof(t), &t); return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static struct tc_action_ops act_skbedit_ops = { .kind = "skbedit", .hinfo = &skbedit_hash_info, .type = TCA_ACT_SKBEDIT, .capab = TCA_CAP_NONE, .owner = THIS_MODULE, .act = tcf_skbedit, .dump = tcf_skbedit_dump, .cleanup = tcf_skbedit_cleanup, .init = tcf_skbedit_init, .walk = tcf_generic_walker, }; MODULE_AUTHOR("Alexander Duyck, <*****@*****.**>"); MODULE_DESCRIPTION("SKB Editing"); MODULE_LICENSE("GPL"); static int __init skbedit_init_module(void) { return tcf_register_action(&act_skbedit_ops); } static void __exit skbedit_cleanup_module(void) { tcf_unregister_action(&act_skbedit_ops); } module_init(skbedit_init_module); module_exit(skbedit_cleanup_module);
static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_nat *p = a->priv; struct tc_nat opt = { .old_addr = p->old_addr, .new_addr = p->new_addr, .mask = p->mask, .flags = p->flags, .index = p->tcf_index, .action = p->tcf_action, .refcnt = p->tcf_refcnt - ref, .bindcnt = p->tcf_bindcnt - bind, }; struct tcf_t t; NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt); t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(p->tcf_tm.expires); NLA_PUT(skb, TCA_NAT_TM, sizeof(t), &t); return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static struct tc_action_ops act_nat_ops = { .kind = "nat", .hinfo = &nat_hash_info, .type = TCA_ACT_NAT, .capab = TCA_CAP_NONE, .owner = THIS_MODULE, .act = tcf_nat, .dump = tcf_nat_dump, .cleanup = tcf_nat_cleanup, .lookup = tcf_hash_search, .init = tcf_nat_init, .walk = tcf_generic_walker }; MODULE_DESCRIPTION("Stateless NAT actions"); MODULE_LICENSE("GPL"); static int __init nat_init_module(void) { return tcf_register_action(&act_nat_ops); } static void __exit nat_cleanup_module(void) { tcf_unregister_action(&act_nat_ops); } module_init(nat_init_module); module_exit(nat_cleanup_module);
static int meta_fill(struct rtnl_ematch *e, struct nl_msg *msg) { struct meta_data *m = rtnl_ematch_data(e); struct tcf_meta_hdr hdr; if (!(m->left && m->right)) return -NLE_MISSING_ATTR; memset(&hdr, 0, sizeof(hdr)); hdr.left.kind = (m->left->mv_type << 12) & TCF_META_TYPE_MASK; hdr.left.kind |= m->left->mv_id & TCF_META_ID_MASK; hdr.left.shift = m->left->mv_shift; hdr.left.op = m->opnd; hdr.right.kind = (m->right->mv_type << 12) & TCF_META_TYPE_MASK; hdr.right.kind |= m->right->mv_id & TCF_META_ID_MASK; NLA_PUT(msg, TCA_EM_META_HDR, sizeof(hdr), &hdr); if (m->left->mv_len) NLA_PUT(msg, TCA_EM_META_LVALUE, m->left->mv_len, (m->left + 1)); if (m->right->mv_len) NLA_PUT(msg, TCA_EM_META_RVALUE, m->right->mv_len, (m->right + 1)); return 0; nla_put_failure: return -NLE_NOMEM; }
static inline int ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct, enum ip_conntrack_dir dir) { enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG; struct nlattr *nest_count; __be32 tmp; nest_count = nla_nest_start(skb, type | NLA_F_NESTED); if (!nest_count) goto nla_put_failure; tmp = htonl(ct->counters[dir].packets); NLA_PUT(skb, CTA_COUNTERS32_PACKETS, sizeof(u_int32_t), &tmp); tmp = htonl(ct->counters[dir].bytes); NLA_PUT(skb, CTA_COUNTERS32_BYTES, sizeof(u_int32_t), &tmp); nla_nest_end(skb, nest_count); return 0; nla_put_failure: return -1; }
static int tbf_msg_fill(struct rtnl_tc *tc, void *data, struct nl_msg *msg) { uint32_t rtab[RTNL_TC_RTABLE_SIZE], ptab[RTNL_TC_RTABLE_SIZE]; struct tc_tbf_qopt opts; struct rtnl_tbf *tbf = data; int required = TBF_ATTR_RATE | TBF_ATTR_LIMIT; if (!(tbf->qt_mask & required) != required) return -NLE_MISSING_ATTR; memset(&opts, 0, sizeof(opts)); opts.limit = tbf->qt_limit; opts.buffer = tbf->qt_rate_txtime; rtnl_tc_build_rate_table(tc, &tbf->qt_rate, rtab); rtnl_rcopy_ratespec(&opts.rate, &tbf->qt_rate); if (tbf->qt_mask & TBF_ATTR_PEAKRATE) { opts.mtu = tbf->qt_peakrate_txtime; rtnl_tc_build_rate_table(tc, &tbf->qt_peakrate, ptab); rtnl_rcopy_ratespec(&opts.peakrate, &tbf->qt_peakrate); } NLA_PUT(msg, TCA_TBF_PARMS, sizeof(opts), &opts); NLA_PUT(msg, TCA_TBF_RTAB, sizeof(rtab), rtab); if (tbf->qt_mask & TBF_ATTR_PEAKRATE) NLA_PUT(msg, TCA_TBF_PTAB, sizeof(ptab), ptab); return 0; nla_put_failure: return -NLE_MSGSIZE; }
static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, struct l2tp_session *session) { void *hdr; struct nlattr *nest; struct l2tp_tunnel *tunnel = session->tunnel; struct sock *sk = NULL; sk = tunnel->sock; hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET); if (IS_ERR(hdr)) return PTR_ERR(hdr); NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id); NLA_PUT_U32(skb, L2TP_ATTR_SESSION_ID, session->session_id); NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id); NLA_PUT_U32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id); NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, session->debug); NLA_PUT_U16(skb, L2TP_ATTR_PW_TYPE, session->pwtype); NLA_PUT_U16(skb, L2TP_ATTR_MTU, session->mtu); if (session->mru) NLA_PUT_U16(skb, L2TP_ATTR_MRU, session->mru); if (session->ifname && session->ifname[0]) NLA_PUT_STRING(skb, L2TP_ATTR_IFNAME, session->ifname); if (session->cookie_len) NLA_PUT(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0]); if (session->peer_cookie_len) NLA_PUT(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, &session->peer_cookie[0]); NLA_PUT_U8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq); NLA_PUT_U8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq); NLA_PUT_U8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode); #ifdef CONFIG_XFRM if ((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) NLA_PUT_U8(skb, L2TP_ATTR_USING_IPSEC, 1); #endif if (session->reorder_timeout) NLA_PUT_MSECS(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout); nest = nla_nest_start(skb, L2TP_ATTR_STATS); if (nest == NULL) goto nla_put_failure; NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets); NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes); NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors); NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets); NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes); NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, session->stats.rx_seq_discards); NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, session->stats.rx_oos_packets); NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors); nla_nest_end(skb, nest); return genlmsg_end(skb, hdr); nla_put_failure: genlmsg_cancel(skb, hdr); return -1; }
static int htb_class_msg_fill(struct rtnl_tc *tc, void *data, struct nl_msg *msg) { struct rtnl_htb_class *htb = data; uint32_t mtu, rtable[RTNL_TC_RTABLE_SIZE], ctable[RTNL_TC_RTABLE_SIZE]; struct tc_htb_opt opts; int buffer, cbuffer; if (!htb || !(htb->ch_mask & SCH_HTB_HAS_RATE)) BUG(); /* if not set, zero (0) is used as priority */ if (htb->ch_mask & SCH_HTB_HAS_PRIO) opts.prio = htb->ch_prio; memset(&opts, 0, sizeof(opts)); mtu = rtnl_tc_get_mtu(tc); rtnl_tc_build_rate_table(tc, &htb->ch_rate, rtable); rtnl_rcopy_ratespec(&opts.rate, &htb->ch_rate); if (htb->ch_mask & SCH_HTB_HAS_CEIL) { rtnl_tc_build_rate_table(tc, &htb->ch_ceil, ctable); rtnl_rcopy_ratespec(&opts.ceil, &htb->ch_ceil); } else { /* * If not set, configured rate is used as ceil, which implies * no borrowing. */ memcpy(&opts.ceil, &opts.rate, sizeof(struct tc_ratespec)); } if (htb->ch_mask & SCH_HTB_HAS_RBUFFER) buffer = htb->ch_rbuffer; else buffer = opts.rate.rate / nl_get_user_hz() + mtu; /* XXX */ opts.buffer = rtnl_tc_calc_txtime(buffer, opts.rate.rate); if (htb->ch_mask & SCH_HTB_HAS_CBUFFER) cbuffer = htb->ch_cbuffer; else cbuffer = opts.ceil.rate / nl_get_user_hz() + mtu; /* XXX */ opts.cbuffer = rtnl_tc_calc_txtime(cbuffer, opts.ceil.rate); if (htb->ch_mask & SCH_HTB_HAS_QUANTUM) opts.quantum = htb->ch_quantum; NLA_PUT(msg, TCA_HTB_PARMS, sizeof(opts), &opts); NLA_PUT(msg, TCA_HTB_RTAB, sizeof(rtable), &rtable); NLA_PUT(msg, TCA_HTB_CTAB, sizeof(ctable), &ctable); return 0; nla_put_failure: return -NLE_MSGSIZE; }
static int ipvs_nl_fill_service_attr(struct nl_msg *msg, ipvs_service_t *svc) { struct nlattr *nl_service; struct ip_vs_flags flags = { .flags = svc->flags, .mask = ~0 }; nl_service = nla_nest_start(msg, IPVS_CMD_ATTR_SERVICE); if (!nl_service) return -1; NLA_PUT_U16(msg, IPVS_SVC_ATTR_AF, svc->af); if (svc->fwmark) { NLA_PUT_U32(msg, IPVS_SVC_ATTR_FWMARK, svc->fwmark); } else { NLA_PUT_U16(msg, IPVS_SVC_ATTR_PROTOCOL, svc->protocol); NLA_PUT(msg, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &(svc->addr)); NLA_PUT_U16(msg, IPVS_SVC_ATTR_PORT, svc->port); } NLA_PUT_STRING(msg, IPVS_SVC_ATTR_SCHED_NAME, svc->sched_name); if (svc->pe_name[0]) NLA_PUT_STRING(msg, IPVS_SVC_ATTR_PE_NAME, svc->pe_name); NLA_PUT(msg, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags); NLA_PUT_U32(msg, IPVS_SVC_ATTR_TIMEOUT, svc->timeout); NLA_PUT_U32(msg, IPVS_SVC_ATTR_NETMASK, svc->netmask); nla_nest_end(msg, nl_service); return 0; nla_put_failure: return -1; } #endif int ipvs_add_service(ipvs_service_t *svc) { ipvs_func = ipvs_add_service; #ifdef LIBIPVS_USE_NL if (try_nl) { struct nl_msg *msg = ipvs_nl_message(IPVS_CMD_NEW_SERVICE, 0); if (!msg) return -1; if (ipvs_nl_fill_service_attr(msg, svc)) { nlmsg_free(msg); return -1; } return ipvs_nl_send_message(msg, ipvs_nl_noop_cb, NULL); } #endif CHECK_COMPAT_SVC(svc, -1); return setsockopt(sockfd, IPPROTO_IP, IP_VS_SO_SET_ADD, (char *)svc, sizeof(struct ip_vs_service_kern)); out_err: return -1; }
static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_csum *p = a->priv; struct tc_csum opt = { .update_flags = p->update_flags, .index = p->tcf_index, .action = p->tcf_action, .refcnt = p->tcf_refcnt - ref, .bindcnt = p->tcf_bindcnt - bind, }; struct tcf_t t; NLA_PUT(skb, TCA_CSUM_PARMS, sizeof(opt), &opt); t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(p->tcf_tm.expires); NLA_PUT(skb, TCA_CSUM_TM, sizeof(t), &t); return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static struct tc_action_ops act_csum_ops = { .kind = "csum", .hinfo = &csum_hash_info, .type = TCA_ACT_CSUM, .capab = TCA_CAP_NONE, .owner = THIS_MODULE, .act = tcf_csum, .dump = tcf_csum_dump, .cleanup = tcf_csum_cleanup, .lookup = tcf_hash_search, .init = tcf_csum_init, .walk = tcf_generic_walker }; MODULE_DESCRIPTION("Checksum updating actions"); MODULE_LICENSE("GPL"); static int __init csum_init_module(void) { return tcf_register_action(&act_csum_ops); } static void __exit csum_cleanup_module(void) { tcf_unregister_action(&act_csum_ops); } module_init(csum_init_module); module_exit(csum_cleanup_module);
static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_defact *d = a->priv; struct tc_defact opt = { .index = d->tcf_index, .refcnt = d->tcf_refcnt - ref, .bindcnt = d->tcf_bindcnt - bind, .action = d->tcf_action, }; struct tcf_t t; NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt); NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata); t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(d->tcf_tm.expires); NLA_PUT(skb, TCA_DEF_TM, sizeof(t), &t); return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static struct tc_action_ops act_simp_ops = { .kind = "simple", .hinfo = &simp_hash_info, .type = TCA_ACT_SIMP, .capab = TCA_CAP_NONE, .owner = THIS_MODULE, .act = tcf_simp, .dump = tcf_simp_dump, .cleanup = tcf_simp_cleanup, .init = tcf_simp_init, .walk = tcf_generic_walker, }; MODULE_AUTHOR("Jamal Hadi Salim(2005)"); MODULE_DESCRIPTION("Simple example action"); MODULE_LICENSE("GPL"); static int __init simp_init_module(void) { int ret = tcf_register_action(&act_simp_ops); if (!ret) pr_info("Simple TC action Loaded\n"); return ret; } static void __exit simp_cleanup_module(void) { tcf_unregister_action(&act_simp_ops); }
static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_mirred *m = a->priv; struct tc_mirred opt = { .index = m->tcf_index, .action = m->tcf_action, .refcnt = m->tcf_refcnt - ref, .bindcnt = m->tcf_bindcnt - bind, .eaction = m->tcfm_eaction, .ifindex = m->tcfm_ifindex, }; struct tcf_t t; NLA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt); t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(m->tcf_tm.expires); NLA_PUT(skb, TCA_MIRRED_TM, sizeof(t), &t); return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static struct tc_action_ops act_mirred_ops = { .kind = "mirred", .hinfo = &mirred_hash_info, .type = TCA_ACT_MIRRED, .capab = TCA_CAP_NONE, .owner = THIS_MODULE, .act = tcf_mirred, .dump = tcf_mirred_dump, .cleanup = tcf_mirred_cleanup, .lookup = tcf_hash_search, .init = tcf_mirred_init, .walk = tcf_generic_walker }; MODULE_AUTHOR("Jamal Hadi Salim(2002)"); MODULE_DESCRIPTION("Device Mirror/redirect actions"); MODULE_LICENSE("GPL"); static int __init mirred_init_module(void) { printk("Mirror/redirect action on\n"); return tcf_register_action(&act_mirred_ops); } static void __exit mirred_cleanup_module(void) { tcf_unregister_action(&act_mirred_ops); }
int join_mesh(char* ifname, char *mesh_id, int mesh_id_len, char *vendor_ie, int vendor_ie_len) { struct nl_msg *msg; uint8_t cmd = NL80211_CMD_JOIN_MESH; int ret; char *pret; int ifindex = if_nametoindex(ifname); msg = nlmsg_alloc(); if (!msg) return -ENOMEM; if (!mesh_id || !mesh_id_len) return -EINVAL; printf("o11s-pathseld: Staring mesh with mesh id = %s\n", mesh_id); pret = genlmsg_put(msg, 0, 0, genl_family_get_id(nlcfg.nl80211), 0, 0, cmd, 0); if (pret == NULL) goto nla_put_failure; if (vendor_ie) { struct nlattr *container = nla_nest_start(msg, NL80211_ATTR_MESH_PARAMS); if (!container) return -ENOBUFS; NLA_PUT(msg, NL80211_MESHCONF_VENDOR_PATH_SEL_IE, vendor_ie_len, vendor_ie); NLA_PUT_U8(msg, NL80211_MESHCONF_ENABLE_VENDOR_PATH_SEL, 1); NLA_PUT_U8(msg, NL80211_MESHCONF_ENABLE_VENDOR_METRIC, 1); nla_nest_end(msg, container); } NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, ifindex); NLA_PUT(msg, NL80211_ATTR_MESH_ID, mesh_id_len, mesh_id); ret = send_and_recv_msgs(msg, NULL, NULL); if (ret) printf("Mesh start failed: %d (%s)\n", ret, strerror(-ret)); else printf("Mesh start succeeded. Yay!\n"); return ret; nla_put_failure: return -ENOBUFS; }
static int iwl_testmode_sram_dump(struct ieee80211_hw *hw, struct nlattr **tb, struct sk_buff *skb, struct netlink_callback *cb) { struct iwl_priv *priv = hw->priv; int idx, length; if (priv->testmode_sram.sram_readed) { idx = cb->args[4]; if (idx >= priv->testmode_sram.num_chunks) { iwl_sram_cleanup(priv); return -ENOENT; } length = DUMP_CHUNK_SIZE; if (((idx + 1) == priv->testmode_sram.num_chunks) && (priv->testmode_sram.buff_size % DUMP_CHUNK_SIZE)) length = priv->testmode_sram.buff_size % DUMP_CHUNK_SIZE; NLA_PUT(skb, IWL_TM_ATTR_SRAM_DUMP, length, priv->testmode_sram.buff_addr + (DUMP_CHUNK_SIZE * idx)); idx++; cb->args[4] = idx; return 0; } else return -EFAULT; nla_put_failure: return -ENOBUFS; }
int ieee802154_nl_assoc_confirm(struct net_device *dev, u16 short_addr, u8 status) { struct sk_buff *msg; pr_debug("%s\n", __func__); msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_CONF); if (!msg) return -ENOBUFS; NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, dev->dev_addr); NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr); NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; }
static int handle_mpp_get(struct nl80211_state *state, struct nl_msg *msg, int argc, char **argv, enum id_input id) { unsigned char dst[ETH_ALEN]; if (argc < 1) return 1; if (mac_addr_a2n(dst, argv[0])) { fprintf(stderr, "invalid mac address\n"); return 2; } argc--; argv++; if (argc) return 1; NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, dst); register_handler(print_mpp_handler, NULL); return 0; nla_put_failure: return -ENOBUFS; }
static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags, struct net_device *dev) { void *hdr; struct wpan_phy *phy; pr_debug("%s\n", __func__); hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags, IEEE802154_LIST_IFACE); if (!hdr) goto out; phy = ieee802154_mlme_ops(dev)->get_phy(dev); BUG_ON(!phy); NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, dev->dev_addr); NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, ieee802154_mlme_ops(dev)->get_short_addr(dev)); NLA_PUT_U16(msg, IEEE802154_ATTR_PAN_ID, ieee802154_mlme_ops(dev)->get_pan_id(dev)); wpan_phy_put(phy); return genlmsg_end(msg, hdr); nla_put_failure: wpan_phy_put(phy); genlmsg_cancel(msg, hdr); out: return -EMSGSIZE; }
static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) { struct tbf_sched_data *q = qdisc_priv(sch); struct nlattr *nest; struct tc_tbf_qopt opt; nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; opt.limit = q->limit; opt.rate = q->R_tab->rate; if (q->P_tab) opt.peakrate = q->P_tab->rate; else memset(&opt.peakrate, 0, sizeof(opt.peakrate)); opt.mtu = q->mtu; opt.buffer = q->buffer; NLA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt); nla_nest_end(skb, nest); return skb->len; nla_put_failure: nla_nest_cancel(skb, nest); return -1; }
static int iwl_testmode_buffer_dump(struct ieee80211_hw *hw, struct sk_buff *skb, struct netlink_callback *cb) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); int idx, length; if (priv->testmode_mem.read_in_progress) { idx = cb->args[4]; if (idx >= priv->testmode_mem.num_chunks) { iwl_mem_cleanup(priv); return -ENOENT; } length = DUMP_CHUNK_SIZE; if (((idx + 1) == priv->testmode_mem.num_chunks) && (priv->testmode_mem.buff_size % DUMP_CHUNK_SIZE)) length = priv->testmode_mem.buff_size % DUMP_CHUNK_SIZE; NLA_PUT(skb, IWL_TM_ATTR_BUFFER_DUMP, length, priv->testmode_mem.buff_addr + (DUMP_CHUNK_SIZE * idx)); idx++; cb->args[4] = idx; return 0; } else return -EFAULT; nla_put_failure: return -ENOBUFS; }
static int handle_interface_wds_peer(struct nl80211_state *state, struct nl_cb *cb, struct nl_msg *msg, int argc, char **argv, enum id_input id) { unsigned char mac_addr[ETH_ALEN]; if (argc < 1) return 1; if (mac_addr_a2n(mac_addr, argv[0])) { fprintf(stderr, "Invalid MAC address\n"); return 2; } argc--; argv++; if (argc) return 1; NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); return 0; nla_put_failure: return -ENOBUFS; }
static struct nl_msg *gen_msg(int iface, char* ssid, int chan) { struct nl_msg *msg, *ssids, *freqs; msg = nlmsg_alloc(); ssids = nlmsg_alloc(); freqs = nlmsg_alloc(); if (!msg || !ssids || !freqs) { fprintf(stderr, "Failed to allocate netlink message"); if(msg) nlmsg_free(msg); if(ssids) nlmsg_free(ssids); if(freqs) nlmsg_free(freqs); return NULL; } genlmsg_put(msg, 0, 0, handle_id, 0, 0, NL80211_CMD_TRIGGER_SCAN, 0); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, iface); NLA_PUT(ssids, 1, strlen(ssid), ssid); nla_put_nested(msg, NL80211_ATTR_SCAN_SSIDS, ssids); NLA_PUT_U32(freqs, 1, chan*5 + 2407); nla_put_nested(msg, NL80211_ATTR_SCAN_FREQUENCIES, freqs); return msg; nla_put_failure: nlmsg_free(msg); return NULL; }
/* * Fill a rtnetlink message with our event data. * Note that we propage only the specified event and don't dump the * current wireless config. Dumping the wireless config is far too * expensive (for each parameter, the driver need to query the hardware). */ static int rtnetlink_fill_iwinfo(struct sk_buff *skb, struct net_device *dev, int type, char *event, int event_len) { struct ifinfomsg *r; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, 0, 0, type, sizeof(*r), 0); if (nlh == NULL) return -EMSGSIZE; r = nlmsg_data(nlh); r->ifi_family = AF_UNSPEC; r->__ifi_pad = 0; r->ifi_type = dev->type; r->ifi_index = dev->ifindex; r->ifi_flags = dev_get_flags(dev); r->ifi_change = 0; /* Wireless changes don't affect those flags */ /* Add the wireless events in the netlink packet */ NLA_PUT(skb, IFLA_WIRELESS, event_len, event); return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; }
static int register_mgmt_frame(struct nl80211_state *state, struct nl_msg *msg, int argc, char **argv, enum id_input id) { unsigned int type; unsigned char *match; size_t match_len; int ret; ret = sscanf(argv[0], "%x", &type); if (ret != 1) { printf("invalid frame type: %s\n", argv[0]); return 2; } match = parse_hex(argv[1], &match_len); if (!match) { printf("invalid frame pattern: %s\n", argv[1]); return 2; } NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE, type); NLA_PUT(msg, NL80211_ATTR_FRAME_MATCH, match_len, match); return 0; nla_put_failure: return -ENOBUFS; }
int reroute_path_selection_frames(char* ifname) { struct nl_msg *msg; uint8_t cmd = NL80211_CMD_REGISTER_FRAME; int ret; char *pret; char action_code[2] = { 0x20, 0x00 }; int ifindex = if_nametoindex(ifname); msg = nlmsg_alloc(); if (!msg) return -ENOMEM; pret = genlmsg_put(msg, 0, 0, genl_family_get_id(nlcfg.nl80211), 0, 0, cmd, 0); if (pret == NULL) goto nla_put_failure; NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, ifindex); NLA_PUT(msg, NL80211_ATTR_FRAME_MATCH, sizeof(action_code), action_code); ret = send_and_recv_msgs(msg, receive_ps_frames, NULL); if (ret) printf("Registering for path selection frames failed: %d (%s)\n", ret, strerror(-ret)); else printf("Registering for path selection frames succeeded. Yay!\n"); return ret; nla_put_failure: return -ENOBUFS; }
static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct ieee80211_hw *hw = priv->hw; struct sk_buff *skb; void *data; int length; data = (void *)rxb_addr(rxb); length = get_event_length(rxb); if (!data || length == 0) return; skb = cfg80211_testmode_alloc_event_skb(hw->wiphy, 20 + length, GFP_ATOMIC); if (skb == NULL) { IWL_DEBUG_INFO(priv, "Run out of memory for messages to user space ?\n"); return; } NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT); NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data); cfg80211_testmode_event(skb, GFP_ATOMIC); return; nla_put_failure: kfree_skb(skb); IWL_DEBUG_INFO(priv, "Ouch, overran buffer, check allocation!\n"); }
static int iwl_testmode_trace_dump(struct ieee80211_hw *hw, struct nlattr **tb, struct sk_buff *skb, struct netlink_callback *cb) { struct iwl_priv *priv = hw->priv; int idx, length; if (priv->testmode_trace.trace_enabled && priv->testmode_trace.trace_addr) { idx = cb->args[4]; if (idx >= priv->testmode_trace.num_chunks) return -ENOENT; length = DUMP_CHUNK_SIZE; if (((idx + 1) == priv->testmode_trace.num_chunks) && (priv->testmode_trace.buff_size % DUMP_CHUNK_SIZE)) length = priv->testmode_trace.buff_size % DUMP_CHUNK_SIZE; NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length, priv->testmode_trace.trace_addr + (DUMP_CHUNK_SIZE * idx)); idx++; cb->args[4] = idx; return 0; } else return -EFAULT; nla_put_failure: return -ENOBUFS; }