int nfct_msg_set_sctpinfo(nfct_msg *m, const conn_sctpinfo *info, int mask) { conn_entry *e = (conn_entry *)m->entry; nfct_msg_ctl *ctl = (nfct_msg_ctl *)m; struct nlattr **nla = e->nla; struct nlattr *proto, *sctp; size_t sz = 0; if(nla[CTA_PROTOINFO] || ! mask) return -1; if(mask & SCTP_F_STATE) sz += nla_total_size(sizeof(__u8)); if(mask & SCTP_F_VTAG_ORIG) sz += nla_total_size(sizeof(__u32)); if(mask & SCTP_F_VTAG_REP) sz += nla_total_size(sizeof(__u32)); if(sz) { sz = nla_total_size(nla_total_size(sz)); if(sz < msg_free_space(ctl)) { proto = nla_nested_start(&ctl->ctx, CTA_PROTOINFO); sctp = nla_nested_start(&ctl->ctx, CTA_PROTOINFO_SCTP); if(mask & SCTP_F_STATE) nla_put_u8(&ctl->ctx, CTA_PROTOINFO_SCTP_STATE, info->state); if(mask & SCTP_F_VTAG_ORIG) nla_put_be32(&ctl->ctx, CTA_PROTOINFO_SCTP_VTAG_ORIGINAL, htonl(info->vtag_orig)); if(mask & SCTP_F_VTAG_REP) nla_put_be32(&ctl->ctx, CTA_PROTOINFO_SCTP_VTAG_REPLY, htonl(info->vtag_rep)); nla_nested_end(sctp, ctl->ctx); nla_nested_end(proto, ctl->ctx); nla[CTA_PROTOINFO] = proto; return 0; } } return -1; }
static size_t ipip_get_size(const struct net_device *dev) { return /* IFLA_IPTUN_LINK */ nla_total_size(4) + /* IFLA_IPTUN_LOCAL */ nla_total_size(4) + /* IFLA_IPTUN_REMOTE */ nla_total_size(4) + /* IFLA_IPTUN_TTL */ nla_total_size(1) + /* IFLA_IPTUN_TOS */ nla_total_size(1) + /* IFLA_IPTUN_PMTUDISC */ nla_total_size(1) + /* IFLA_IPTUN_ENCAP_TYPE */ nla_total_size(2) + /* IFLA_IPTUN_ENCAP_FLAGS */ nla_total_size(2) + /* IFLA_IPTUN_ENCAP_SPORT */ nla_total_size(2) + /* IFLA_IPTUN_ENCAP_DPORT */ nla_total_size(2) + 0; }
static size_t ipgre_get_size(const struct net_device *dev) { return /* IFLA_GRE_LINK */ nla_total_size(4) + /* IFLA_GRE_IFLAGS */ nla_total_size(2) + /* IFLA_GRE_OFLAGS */ nla_total_size(2) + /* IFLA_GRE_IKEY */ nla_total_size(4) + /* IFLA_GRE_OKEY */ nla_total_size(4) + /* IFLA_GRE_LOCAL */ nla_total_size(4) + /* IFLA_GRE_REMOTE */ nla_total_size(4) + /* IFLA_GRE_TTL */ nla_total_size(1) + /* IFLA_GRE_TOS */ nla_total_size(1) + /* IFLA_GRE_PMTUDISC */ nla_total_size(1) + 0; }
static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate) { return nla_total_size_64bit(8) /* LWTUNNEL_IP6_ID */ + nla_total_size(16) /* LWTUNNEL_IP6_DST */ + nla_total_size(16) /* LWTUNNEL_IP6_SRC */ + nla_total_size(1) /* LWTUNNEL_IP6_HOPLIMIT */ + nla_total_size(1) /* LWTUNNEL_IP6_TC */ + nla_total_size(2); /* LWTUNNEL_IP6_FLAGS */ }
static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate) { return nla_total_size_64bit(8) /* LWTUNNEL_IP_ID */ + nla_total_size(4) /* LWTUNNEL_IP_DST */ + nla_total_size(4) /* LWTUNNEL_IP_SRC */ + nla_total_size(1) /* LWTUNNEL_IP_TOS */ + nla_total_size(1) /* LWTUNNEL_IP_TTL */ + nla_total_size(2); /* LWTUNNEL_IP_FLAGS */ }
static size_t br_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct ifinfomsg)) + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ + nla_total_size(4) /* IFLA_MASTER */ + nla_total_size(4) /* IFLA_MTU */ + nla_total_size(1); /* IFLA_OPERSTATE */ }
static inline size_t vrf_fib_rule_nl_size(void) { size_t sz; sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr)); sz += nla_total_size(sizeof(u8)); /* FRA_L3MDEV */ sz += nla_total_size(sizeof(u32)); /* FRA_PRIORITY */ return sz; }
void quota_send_warning(struct kqid qid, dev_t dev, const char warntype) { static atomic_t seq; struct sk_buff *skb; void *msg_head; int ret; int msg_size = 4 * nla_total_size(sizeof(u32)) + 2 * nla_total_size(sizeof(u64)); /* We have to allocate using GFP_NOFS as we are called from a * filesystem performing write and thus further recursion into * the fs to free some data could cause deadlocks. */ skb = genlmsg_new(msg_size, GFP_NOFS); if (!skb) { printk(KERN_ERR "VFS: Not enough memory to send quota warning.\n"); return; } msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq), "a_genl_family, 0, QUOTA_NL_C_WARNING); if (!msg_head) { printk(KERN_ERR "VFS: Cannot store netlink header in quota warning.\n"); goto err_out; } ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, qid.type); if (ret) goto attr_err_out; ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, from_kqid_munged(&init_user_ns, qid)); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev)); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev)); if (ret) goto attr_err_out; ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, from_kuid_munged(&init_user_ns, current_uid())); if (ret) goto attr_err_out; genlmsg_end(skb, msg_head); genlmsg_multicast("a_genl_family, skb, 0, 0, GFP_NOFS); return; attr_err_out: printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); err_out: kfree_skb(skb); }
/** * wimax_msg_alloc - Create a new skb for sending a message to userspace * * @wimax_dev: WiMAX device descriptor * @pipe_name: "named pipe" the message will be sent to * @msg: pointer to the message data to send * @size: size of the message to send (in bytes), including the header. * @gfp_flags: flags for memory allocation. * * Returns: %0 if ok, negative errno code on error * * Description: * * Allocates an skb that will contain the message to send to user * space over the messaging pipe and initializes it, copying the * payload. * * Once this call is done, you can deliver it with * wimax_msg_send(). * * IMPORTANT: * * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as * wimax_msg_send() depends on skb->data being placed at the * beginning of the user message. */ struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev, const char *pipe_name, const void *msg, size_t size, gfp_t gfp_flags) { int result; struct device *dev = wimax_dev->net_dev->dev.parent; size_t msg_size; void *genl_msg; struct sk_buff *skb; msg_size = nla_total_size(size) + nla_total_size(sizeof(u32)) + (pipe_name ? nla_total_size(strlen(pipe_name)) : 0); result = -ENOMEM; skb = genlmsg_new(msg_size, gfp_flags); if (skb == NULL) goto error_new; genl_msg = genlmsg_put(skb, 0, 0, &wimax_gnl_family, 0, WIMAX_GNL_OP_MSG_TO_USER); if (genl_msg == NULL) { dev_err(dev, "no memory to create generic netlink message\n"); goto error_genlmsg_put; } result = nla_put_u32(skb, WIMAX_GNL_MSG_IFIDX, wimax_dev->net_dev->ifindex); if (result < 0) { dev_err(dev, "no memory to add ifindex attribute\n"); goto error_nla_put; } if (pipe_name) { result = nla_put_string(skb, WIMAX_GNL_MSG_PIPE_NAME, pipe_name); if (result < 0) { dev_err(dev, "no memory to add pipe_name attribute\n"); goto error_nla_put; } } result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg); if (result < 0) { dev_err(dev, "no memory to add payload (msg %p size %zu) in " "attribute: %d\n", msg, size, result); goto error_nla_put; } genlmsg_end(skb, genl_msg); return skb; error_nla_put: error_genlmsg_put: error_new: nlmsg_free(skb); return ERR_PTR(result); }
void quota_send_warning(short type, unsigned int id, dev_t dev, const char warntype) { static atomic_t seq; struct sk_buff *skb; void *msg_head; int ret; int msg_size = 4 * nla_total_size(sizeof(u32)) + 2 * nla_total_size(sizeof(u64)); /* */ skb = genlmsg_new(msg_size, GFP_NOFS); if (!skb) { printk(KERN_ERR "VFS: Not enough memory to send quota warning.\n"); return; } msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq), "a_genl_family, 0, QUOTA_NL_C_WARNING); if (!msg_head) { printk(KERN_ERR "VFS: Cannot store netlink header in quota warning.\n"); goto err_out; } ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, type); if (ret) goto attr_err_out; ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, id); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev)); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev)); if (ret) goto attr_err_out; ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid()); if (ret) goto attr_err_out; genlmsg_end(skb, msg_head); genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS); return; attr_err_out: printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); err_out: kfree_skb(skb); }
/* * @pid:用户进行id @data:发送数据缓冲区 @data_len:缓冲区长度 */ static int genlnet_msg_send(int pid, char *data, uint32_t data_len) { char buf[MAX_STR_LEN]; void *msg; int ret; size_t size; struct sk_buff *skb; struct genlmsghdr *genlhdr; void *reply; memcpy(buf, data, data_len); size = nla_total_size(data_len) + nla_total_size(0); skb = genlmsg_new(size, GFP_KERNEL); if (skb == NULL) { printk("%s %d\n", __func__, __LINE__); return -1; } msg = genlmsg_put(skb, 0, 0, &genlnet_family, 0, MSG_CMD_NOTIFY); if (msg == NULL) { printk("%s %d\n", __func__, __LINE__); goto err; } ret = nla_put(skb, MSG_CMD, data_len, data); if (ret < 0) { printk("%s %d, ret = %d\n", __func__, __LINE__, ret); goto err; } genlhdr = nlmsg_data(nlmsg_hdr(skb)); reply = genlmsg_data(genlhdr); ret = genlmsg_end(skb, reply); if (ret < 0) { printk("%s %d, ret = %d\n", __func__, __LINE__, ret); goto err; } ret = genlmsg_unicast(&init_net, skb, pid); if (ret < 0) { printk("%s %d, ret = %d\n", __func__, __LINE__, ret); goto err; } return 0; err: //nlmsg_free(skb); return -1; }
static size_t inet_sk_attr_size(void) { return nla_total_size(sizeof(struct tcp_info)) + nla_total_size(1) /* INET_DIAG_SHUTDOWN */ + nla_total_size(1) /* INET_DIAG_TOS */ + nla_total_size(1) /* INET_DIAG_TCLASS */ + nla_total_size(4) /* INET_DIAG_MARK */ + nla_total_size(sizeof(struct inet_diag_meminfo)) + nla_total_size(sizeof(struct inet_diag_msg)) + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) + nla_total_size(TCP_CA_NAME_MAX) + nla_total_size(sizeof(struct tcpvegas_info)) + 64; }
static size_t vti_get_size(const struct net_device *dev) { return /* IFLA_VTI_LINK */ nla_total_size(4) + /* IFLA_VTI_IKEY */ nla_total_size(4) + /* IFLA_VTI_OKEY */ nla_total_size(4) + /* IFLA_VTI_LOCAL */ nla_total_size(4) + /* IFLA_VTI_REMOTE */ nla_total_size(4) + 0; }
int nfct_msg_set_helper_name(nfct_msg *m, const char *name) /* NULL to remove existing helper */ { conn_entry *e = (conn_entry *)m->entry; nfct_msg_ctl *ctl = (nfct_msg_ctl *)m; struct nlattr **nla = e->nla; struct nlattr *help; size_t sz = (name ? strlen(name) : 0); sz = nla_total_size(nla_total_size(sz + 1)); if(! nla[CTA_HELP] && sz < msg_free_space(ctl)) { help = nla_nested_start(&ctl->ctx, CTA_HELP); nla_put_string(&ctl->ctx, CTA_HELP_NAME, name ? : ""); nla_nested_end(help, ctl->ctx); nla[CTA_HELP] = help; return 0; }
/* called from BPF program, therefore rcu_read_lock is held * bpf_check() verified that 'buf' pointer to BPF's stack * and it has 'len' bytes for us to read */ void bpf_channel_push_struct(struct bpf_context *pctx, u32 struct_id, const void *buf, u32 len) { struct bpf_dp_context *ctx = container_of(pctx, struct bpf_dp_context, context); struct dp_upcall_info upcall; struct plum *plum; struct nlattr *nla; if (unlikely(!ctx->skb)) return; plum = rcu_dereference(ctx->dp->plums[pctx->plum_id]); if (unlikely(!plum)) return; /* allocate temp nlattr to pass it into ovs_dp_upcall */ nla = kzalloc(nla_total_size(4 + len), GFP_ATOMIC); if (unlikely(!nla)) return; nla->nla_type = OVS_PACKET_ATTR_USERDATA; nla->nla_len = nla_attr_size(4 + len); memcpy(nla_data(nla), &struct_id, 4); memcpy(nla_data(nla) + 4, buf, len); upcall.cmd = OVS_PACKET_CMD_ACTION; upcall.key = NULL; upcall.userdata = nla; upcall.portid = plum->upcall_pid; ovs_dp_upcall(ctx->dp, NULL, &upcall); kfree(nla); }
static size_t tcf_action_full_attrs_size(size_t sz) { return NLMSG_HDRLEN /* struct nlmsghdr */ + sizeof(struct tcamsg) + nla_total_size(0) /* TCA_ACT_TAB nested */ + sz; }
static int mpls_encap_nlsize(struct lwtunnel_state *lwtstate) { struct mpls_iptunnel_encap *tun_encap_info; tun_encap_info = mpls_lwtunnel_encap(lwtstate); return nla_total_size(tun_encap_info->labels * 4); }
static size_t can_get_size(const struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); size_t size; size = nla_total_size(sizeof(u32)); size += sizeof(struct can_ctrlmode); size += nla_total_size(sizeof(u32)); size += sizeof(struct can_bittiming); size += sizeof(struct can_clock); if (priv->do_get_berr_counter) size += sizeof(struct can_berr_counter); if (priv->bittiming_const) size += sizeof(struct can_bittiming_const); return size; }
static size_t can_get_size(const struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); size_t size; size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */ size += sizeof(struct can_ctrlmode); /* IFLA_CAN_CTRLMODE */ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */ size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */ size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */ if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */ size += sizeof(struct can_berr_counter); if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */ size += sizeof(struct can_bittiming_const); return size; }
static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate) { return nla_total_size(8) /* IP_TUN_ID */ + nla_total_size(4) /* IP_TUN_DST */ + nla_total_size(4) /* IP_TUN_SRC */ + nla_total_size(1) /* IP_TUN_TOS */ + nla_total_size(1) /* IP_TUN_TTL */ + nla_total_size(2) /* IP_TUN_SPORT */ + nla_total_size(2) /* IP_TUN_DPORT */ + nla_total_size(2); /* IP_TUN_FLAGS */ }
static size_t bond_get_slave_size(const struct net_device *bond_dev, const struct net_device *slave_dev) { return nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_STATE */ nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_MII_STATUS */ nla_total_size(sizeof(u32)) + /* IFLA_BOND_SLAVE_LINK_FAILURE_COUNT */ nla_total_size(MAX_ADDR_LEN) + /* IFLA_BOND_SLAVE_PERM_HWADDR */ nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_QUEUE_ID */ nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_AGGREGATOR_ID */ nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE */ nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE */ 0; }
int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len) { struct ath6kl *ar = wiphy_priv(wiphy); struct nlattr *tb[ATH6KL_TM_ATTR_MAX + 1]; int err, buf_len, reply_len; struct sk_buff *skb; void *buf; err = nla_parse(tb, ATH6KL_TM_ATTR_MAX, data, len, ath6kl_tm_policy); if (err) return err; if (!tb[ATH6KL_TM_ATTR_CMD]) return -EINVAL; switch (nla_get_u32(tb[ATH6KL_TM_ATTR_CMD])) { case ATH6KL_TM_CMD_TCMD: if (!tb[ATH6KL_TM_ATTR_DATA]) return -EINVAL; buf = nla_data(tb[ATH6KL_TM_ATTR_DATA]); buf_len = nla_len(tb[ATH6KL_TM_ATTR_DATA]); ath6kl_wmi_test_cmd(ar->wmi, buf, buf_len); return 0; break; case ATH6KL_TM_CMD_RX_REPORT: if (!tb[ATH6KL_TM_ATTR_DATA]) return -EINVAL; buf = nla_data(tb[ATH6KL_TM_ATTR_DATA]); buf_len = nla_len(tb[ATH6KL_TM_ATTR_DATA]); reply_len = nla_total_size(ATH6KL_TM_DATA_MAX_LEN); skb = cfg80211_testmode_alloc_reply_skb(wiphy, reply_len); if (!skb) return -ENOMEM; err = ath6kl_tm_rx_report(ar, buf, buf_len, skb); if (err < 0) { kfree_skb(skb); return err; } return cfg80211_testmode_reply(skb); default: return -EOPNOTSUPP; } }
static inline size_t br_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct ifinfomsg)) + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ + nla_total_size(4) /* IFLA_MASTER */ + nla_total_size(4) /* IFLA_MTU */ + nla_total_size(4) /* IFLA_LINK */ + nla_total_size(1) /* IFLA_OPERSTATE */ + nla_total_size(br_port_info_size()); /* IFLA_PROTINFO */ }
int __nfct_msg_set_zone(nfct_msg *m, __u16 zone) { conn_entry *e = (conn_entry *)m->entry; nfct_msg_ctl *ctl = (nfct_msg_ctl *)m; struct nlattr **nla = e->nla; if(! nla[CTA_ZONE] && (size_t)nla_total_size(sizeof(__u16)) < msg_free_space(ctl)) { nla[CTA_ZONE] = (struct nlattr *)ctl->ctx; nla_put_be16(&ctl->ctx, CTA_ZONE, htons(zone)); return 0; } return -1; }
int __nfct_msg_set_be32(nfct_msg *m, int t, __u32 val) { conn_entry *e = (conn_entry *)m->entry; nfct_msg_ctl *ctl = (nfct_msg_ctl *)m; struct nlattr **nla = e->nla; if(! nla[t] && (size_t)nla_total_size(sizeof(__u32)) < msg_free_space(ctl)) { nla[t] = (struct nlattr *)ctl->ctx; nla_put_be32(&ctl->ctx, t, htonl(val)); return 0; } return -1; }
static inline size_t br_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct ifinfomsg)) + nla_total_size(IFNAMSIZ) + nla_total_size(MAX_ADDR_LEN) + nla_total_size(4) + nla_total_size(4) + nla_total_size(4) + nla_total_size(1) + nla_total_size(1); }
void dlm_timeout_warn(struct dlm_lkb *lkb) { struct sk_buff *uninitialized_var(send_skb); struct dlm_lock_data *data; size_t size; int rv; size = nla_total_size(sizeof(struct dlm_lock_data)) + nla_total_size(0); /* why this? */ rv = prepare_data(DLM_CMD_TIMEOUT, &send_skb, size); if (rv < 0) return; data = mk_data(send_skb); if (!data) { nlmsg_free(send_skb); return; } fill_data(data, lkb); send_data(send_skb); }
int rpl_nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen, const void *data, int padattr) { size_t len; if (nla_need_padding_for_64bit(skb)) len = nla_total_size_64bit(attrlen); else len = nla_total_size(attrlen); if (unlikely(skb_tailroom(skb) < len)) return -EMSGSIZE; __nla_put_64bit(skb, attrtype, attrlen, data, padattr); return 0; }
static size_t inet_sk_attr_size(struct sock *sk, const struct inet_diag_req_v2 *req, bool net_admin) { const struct inet_diag_handler *handler; size_t aux = 0; handler = inet_diag_table[req->sdiag_protocol]; if (handler && handler->idiag_get_aux_size) aux = handler->idiag_get_aux_size(sk, net_admin); return nla_total_size(sizeof(struct tcp_info)) + nla_total_size(1) /* INET_DIAG_SHUTDOWN */ + nla_total_size(1) /* INET_DIAG_TOS */ + nla_total_size(1) /* INET_DIAG_TCLASS */ + nla_total_size(4) /* INET_DIAG_MARK */ + nla_total_size(sizeof(struct inet_diag_meminfo)) + nla_total_size(sizeof(struct inet_diag_msg)) + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) + nla_total_size(TCP_CA_NAME_MAX) + nla_total_size(sizeof(struct tcpvegas_info)) + aux + 64; }
static inline size_t br_port_info_size(void) { return nla_total_size(1) /* IFLA_BRPORT_STATE */ + nla_total_size(2) /* IFLA_BRPORT_PRIORITY */ + nla_total_size(4) /* IFLA_BRPORT_COST */ + nla_total_size(1) /* IFLA_BRPORT_MODE */ + nla_total_size(1) /* IFLA_BRPORT_GUARD */ + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ + 0; }