void quota_send_warning(struct kqid qid, dev_t dev, const char warntype) { static atomic_t seq; struct sk_buff *skb; void *msg_head; int ret; int msg_size = 4 * nla_total_size(sizeof(u32)) + 2 * nla_total_size(sizeof(u64)); /* We have to allocate using GFP_NOFS as we are called from a * filesystem performing write and thus further recursion into * the fs to free some data could cause deadlocks. */ skb = genlmsg_new(msg_size, GFP_NOFS); if (!skb) { printk(KERN_ERR "VFS: Not enough memory to send quota warning.\n"); return; } msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq), "a_genl_family, 0, QUOTA_NL_C_WARNING); if (!msg_head) { printk(KERN_ERR "VFS: Cannot store netlink header in quota warning.\n"); goto err_out; } ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, qid.type); if (ret) goto attr_err_out; ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, from_kqid_munged(&init_user_ns, qid)); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev)); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev)); if (ret) goto attr_err_out; ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, from_kuid_munged(&init_user_ns, current_uid())); if (ret) goto attr_err_out; genlmsg_end(skb, msg_head); genlmsg_multicast("a_genl_family, skb, 0, 0, GFP_NOFS); return; attr_err_out: printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); err_out: kfree_skb(skb); }
void quota_send_warning(short type, unsigned int id, dev_t dev, const char warntype) { static atomic_t seq; struct sk_buff *skb; void *msg_head; int ret; int msg_size = 4 * nla_total_size(sizeof(u32)) + 2 * nla_total_size(sizeof(u64)); /* */ skb = genlmsg_new(msg_size, GFP_NOFS); if (!skb) { printk(KERN_ERR "VFS: Not enough memory to send quota warning.\n"); return; } msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq), "a_genl_family, 0, QUOTA_NL_C_WARNING); if (!msg_head) { printk(KERN_ERR "VFS: Cannot store netlink header in quota warning.\n"); goto err_out; } ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, type); if (ret) goto attr_err_out; ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, id); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev)); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev)); if (ret) goto attr_err_out; ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid()); if (ret) goto attr_err_out; genlmsg_end(skb, msg_head); genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS); return; attr_err_out: printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); err_out: kfree_skb(skb); }
static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, struct l2tp_session *session) { void *hdr; struct nlattr *nest; struct l2tp_tunnel *tunnel = session->tunnel; struct sock *sk = NULL; sk = tunnel->sock; hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET); if (IS_ERR(hdr)) return PTR_ERR(hdr); nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id); nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id); nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id); nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id); nla_put_u32(skb, L2TP_ATTR_DEBUG, session->debug); nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype); nla_put_u16(skb, L2TP_ATTR_MTU, session->mtu); if (session->mru) nla_put_u16(skb, L2TP_ATTR_MRU, session->mru); if (session->ifname && session->ifname[0]) nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname); if (session->cookie_len) nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0]); if (session->peer_cookie_len) nla_put(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, &session->peer_cookie[0]); nla_put_u8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq); nla_put_u8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq); nla_put_u8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode); #ifdef CONFIG_XFRM if ((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) nla_put_u8(skb, L2TP_ATTR_USING_IPSEC, 1); #endif if (session->reorder_timeout) nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout); nest = nla_nest_start(skb, L2TP_ATTR_STATS); if (nest == NULL) goto nla_put_failure; nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets); nla_put_u64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes); nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors); nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets); nla_put_u64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes); nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, session->stats.rx_seq_discards); nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, session->stats.rx_oos_packets); nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors); nla_nest_end(skb, nest); return genlmsg_end(skb, hdr); nla_put_failure: genlmsg_cancel(skb, hdr); return -1; }
static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, struct l2tp_tunnel *tunnel) { void *hdr; struct nlattr *nest; struct sock *sk = NULL; struct inet_sock *inet; hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_TUNNEL_GET); if (IS_ERR(hdr)) return PTR_ERR(hdr); nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version); nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id); nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id); nla_put_u32(skb, L2TP_ATTR_DEBUG, tunnel->debug); nla_put_u16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap); nest = nla_nest_start(skb, L2TP_ATTR_STATS); if (nest == NULL) goto nla_put_failure; nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets); nla_put_u64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes); nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors); nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets); nla_put_u64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes); nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, tunnel->stats.rx_seq_discards); nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, tunnel->stats.rx_oos_packets); nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors); nla_nest_end(skb, nest); sk = tunnel->sock; if (!sk) goto out; inet = inet_sk(sk); switch (tunnel->encap) { case L2TP_ENCAPTYPE_UDP: nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)); nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)); nla_put_u8(skb, L2TP_ATTR_UDP_CSUM, (sk->sk_no_check != UDP_CSUM_NOXMIT)); /* NOBREAK */ case L2TP_ENCAPTYPE_IP: nla_put_be32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr); nla_put_be32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr); break; } out: return genlmsg_end(skb, hdr); nla_put_failure: genlmsg_cancel(skb, hdr); return -1; }
static int kfm_genl_fill_mempage(struct sk_buff *skb, struct kfm_mempage_u *umempage) { KFM_DBG(7, "ADDR:0x%016llx, PFN:0x%016llx, FLAGS:0x%016llx", umempage->addr, umempage->pfn, umempage->flags); if (nla_put_u64(skb, KFM_MEMPAGE_ATTR_ADDR, umempage->addr)) goto nla_put_failure; if (nla_put_u64(skb, KFM_MEMPAGE_ATTR_PFN, umempage->pfn)) goto nla_put_failure; if (nla_put_u64(skb, KFM_MEMPAGE_ATTR_FLAGS, umempage->flags)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; }
static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) { struct tbf_sched_data *q = qdisc_priv(sch); struct nlattr *nest; struct tc_tbf_qopt opt; sch->qstats.backlog = q->qdisc->qstats.backlog; nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; opt.limit = q->limit; psched_ratecfg_getrate(&opt.rate, &q->rate); if (q->peak_present) psched_ratecfg_getrate(&opt.peakrate, &q->peak); else memset(&opt.peakrate, 0, sizeof(opt.peakrate)); opt.mtu = PSCHED_NS2TICKS(q->mtu); opt.buffer = PSCHED_NS2TICKS(q->buffer); if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt)) goto nla_put_failure; if (q->rate.rate_bytes_ps >= (1ULL << 32) && nla_put_u64(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps)) goto nla_put_failure; if (q->peak_present && q->peak.rate_bytes_ps >= (1ULL << 32) && nla_put_u64(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps)) goto nla_put_failure; nla_nest_end(skb, nest); return skb->len; nla_put_failure: nla_nest_cancel(skb, nest); return -1; }
static int ip6_tun_fill_encap_info(struct sk_buff *skb, struct lwtunnel_state *lwtstate) { struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate); if (nla_put_u64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id) || nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) || nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) || nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) || nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) || nla_put_u16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags)) return -ENOMEM; return 0; }
static int ip_tun_fill_encap_info(struct sk_buff *skb, struct lwtunnel_state *lwtstate) { struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate); if (nla_put_u64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id) || nla_put_be32(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) || nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) || nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) || nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) || nla_put_u16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags)) return -ENOMEM; return 0; }
static int ip_tun_fill_encap_info(struct sk_buff *skb, struct lwtunnel_state *lwtstate) { struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate); if (nla_put_u64(skb, IP_TUN_ID, tun_info->key.tun_id) || nla_put_be32(skb, IP_TUN_DST, tun_info->key.ipv4_dst) || nla_put_be32(skb, IP_TUN_SRC, tun_info->key.ipv4_src) || nla_put_u8(skb, IP_TUN_TOS, tun_info->key.ipv4_tos) || nla_put_u8(skb, IP_TUN_TTL, tun_info->key.ipv4_ttl) || nla_put_u16(skb, IP_TUN_SPORT, tun_info->key.tp_src) || nla_put_u16(skb, IP_TUN_DPORT, tun_info->key.tp_dst) || nla_put_u16(skb, IP_TUN_FLAGS, tun_info->key.tun_flags)) return -ENOMEM; return 0; }
static int emigration_failed_create_request(struct sk_buff *skb, void* params) { int ret = 0; struct emigration_failed_params* emigration_failed_params = params; ret = nla_put_u32(skb, DIRECTOR_A_PID, emigration_failed_params->pid); if (ret != 0) goto failure; ret = nla_put_string(skb, DIRECTOR_A_NAME, emigration_failed_params->name); if (ret != 0) goto failure; ret = nla_put_u64(skb, DIRECTOR_A_JIFFIES, emigration_failed_params->jiffies); if (ret != 0) goto failure; failure: return ret; }
static int nla_put_hwaddr(struct sk_buff *msg, int type, __le64 hwaddr) { return nla_put_u64(msg, type, swab64((__force u64)hwaddr)); }
static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb, const struct dp_upcall_info *upcall_info) { struct ovs_header *upcall; struct sk_buff *nskb = NULL; struct sk_buff *user_skb; /* to be queued to userspace */ struct nlattr *nla; unsigned int len; int err; if (vlan_tx_tag_present(skb)) { nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return -ENOMEM; nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb)); if (!nskb) return -ENOMEM; nskb->vlan_tci = 0; skb = nskb; } if (nla_attr_size(skb->len) > USHRT_MAX) { err = -EFBIG; goto out; } len = sizeof(struct ovs_header); len += nla_total_size(skb->len); len += nla_total_size(FLOW_BUFSIZE); if (upcall_info->cmd == OVS_PACKET_CMD_ACTION) len += nla_total_size(8); user_skb = genlmsg_new(len, GFP_ATOMIC); if (!user_skb) { err = -ENOMEM; goto out; } upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 0, upcall_info->cmd); upcall->dp_ifindex = dp_ifindex; nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); ovs_flow_to_nlattrs(upcall_info->key, user_skb); nla_nest_end(user_skb, nla); if (upcall_info->userdata) nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA, nla_get_u64(upcall_info->userdata)); nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len); skb_copy_and_csum_dev(skb, nla_data(nla)); err = genlmsg_unicast(&init_net, user_skb, upcall_info->pid); out: kfree_skb(nskb); return err; }
static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, struct l2tp_tunnel *tunnel) { void *hdr; struct nlattr *nest; struct sock *sk = NULL; struct inet_sock *inet; #if IS_ENABLED(CONFIG_IPV6) struct ipv6_pinfo *np = NULL; #endif struct l2tp_stats stats; unsigned int start; hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_TUNNEL_GET); if (IS_ERR(hdr)) return PTR_ERR(hdr); if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) || nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) || nla_put_u32(skb, L2TP_ATTR_DEBUG, tunnel->debug) || nla_put_u16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap)) goto nla_put_failure; nest = nla_nest_start(skb, L2TP_ATTR_STATS); if (nest == NULL) goto nla_put_failure; do { start = u64_stats_fetch_begin(&tunnel->stats.syncp); stats.tx_packets = tunnel->stats.tx_packets; stats.tx_bytes = tunnel->stats.tx_bytes; stats.tx_errors = tunnel->stats.tx_errors; stats.rx_packets = tunnel->stats.rx_packets; stats.rx_bytes = tunnel->stats.rx_bytes; stats.rx_errors = tunnel->stats.rx_errors; stats.rx_seq_discards = tunnel->stats.rx_seq_discards; stats.rx_oos_packets = tunnel->stats.rx_oos_packets; } while (u64_stats_fetch_retry(&tunnel->stats.syncp, start)); if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) || nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) || nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) || nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) || nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) || nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, stats.rx_seq_discards) || nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, stats.rx_oos_packets) || nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) goto nla_put_failure; nla_nest_end(skb, nest); sk = tunnel->sock; if (!sk) goto out; #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == AF_INET6) np = inet6_sk(sk); #endif inet = inet_sk(sk); switch (tunnel->encap) { case L2TP_ENCAPTYPE_UDP: if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) || nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)) || nla_put_u8(skb, L2TP_ATTR_UDP_CSUM, (sk->sk_no_check != UDP_CSUM_NOXMIT))) goto nla_put_failure; /* NOBREAK */ case L2TP_ENCAPTYPE_IP: #if IS_ENABLED(CONFIG_IPV6) if (np) { if (nla_put(skb, L2TP_ATTR_IP6_SADDR, sizeof(np->saddr), &np->saddr) || nla_put(skb, L2TP_ATTR_IP6_DADDR, sizeof(np->daddr), &np->daddr)) goto nla_put_failure; } else #endif if (nla_put_be32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr) || nla_put_be32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr)) goto nla_put_failure; break; } out: return genlmsg_end(skb, hdr); nla_put_failure: genlmsg_cancel(skb, hdr); return -1; }
static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int flags, struct l2tp_tunnel *tunnel, u8 cmd) { void *hdr; struct nlattr *nest; struct sock *sk = NULL; struct inet_sock *inet; #if IS_ENABLED(CONFIG_IPV6) struct ipv6_pinfo *np = NULL; #endif hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, cmd); if (!hdr) return -EMSGSIZE; if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) || nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) || nla_put_u32(skb, L2TP_ATTR_DEBUG, tunnel->debug) || nla_put_u16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap)) goto nla_put_failure; nest = nla_nest_start(skb, L2TP_ATTR_STATS); if (nest == NULL) goto nla_put_failure; if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, atomic_long_read(&tunnel->stats.tx_packets)) || nla_put_u64(skb, L2TP_ATTR_TX_BYTES, atomic_long_read(&tunnel->stats.tx_bytes)) || nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, atomic_long_read(&tunnel->stats.tx_errors)) || nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, atomic_long_read(&tunnel->stats.rx_packets)) || nla_put_u64(skb, L2TP_ATTR_RX_BYTES, atomic_long_read(&tunnel->stats.rx_bytes)) || nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, atomic_long_read(&tunnel->stats.rx_seq_discards)) || nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, atomic_long_read(&tunnel->stats.rx_oos_packets)) || nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, atomic_long_read(&tunnel->stats.rx_errors))) goto nla_put_failure; nla_nest_end(skb, nest); sk = tunnel->sock; if (!sk) goto out; #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == AF_INET6) np = inet6_sk(sk); #endif inet = inet_sk(sk); switch (tunnel->encap) { case L2TP_ENCAPTYPE_UDP: if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) || nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)) || nla_put_u8(skb, L2TP_ATTR_UDP_CSUM, !sk->sk_no_check_tx)) goto nla_put_failure; /* NOBREAK */ case L2TP_ENCAPTYPE_IP: #if IS_ENABLED(CONFIG_IPV6) if (np) { if (nla_put_in6_addr(skb, L2TP_ATTR_IP6_SADDR, &np->saddr) || nla_put_in6_addr(skb, L2TP_ATTR_IP6_DADDR, &sk->sk_v6_daddr)) goto nla_put_failure; } else #endif if (nla_put_in_addr(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr) || nla_put_in_addr(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr)) goto nla_put_failure; break; } out: genlmsg_end(skb, hdr); return 0; nla_put_failure: genlmsg_cancel(skb, hdr); return -1; }