static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, struct l2tp_tunnel *tunnel) { void *hdr; struct nlattr *nest; struct sock *sk = NULL; struct inet_sock *inet; hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_TUNNEL_GET); if (IS_ERR(hdr)) return PTR_ERR(hdr); NLA_PUT_U8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version); NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id); NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id); NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, tunnel->debug); NLA_PUT_U16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap); nest = nla_nest_start(skb, L2TP_ATTR_STATS); if (nest == NULL) goto nla_put_failure; NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets); NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes); NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors); NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets); NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes); NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, tunnel->stats.rx_seq_discards); NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, tunnel->stats.rx_oos_packets); NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors); nla_nest_end(skb, nest); sk = tunnel->sock; if (!sk) goto out; inet = inet_sk(sk); switch (tunnel->encap) { case L2TP_ENCAPTYPE_UDP: NLA_PUT_U16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)); NLA_PUT_U16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)); NLA_PUT_U8(skb, L2TP_ATTR_UDP_CSUM, (sk->sk_no_check != UDP_CSUM_NOXMIT)); /* NOBREAK */ case L2TP_ENCAPTYPE_IP: NLA_PUT_BE32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr); NLA_PUT_BE32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr); break; } out: return genlmsg_end(skb, hdr); nla_put_failure: genlmsg_cancel(skb, hdr); return -1; }
static int ipv4_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple) { NLA_PUT_BE32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip); NLA_PUT_BE32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip); return 0; nla_put_failure: return -1; }
static int udp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeouts = data; NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_UNREPLIED, htonl(timeouts[UDP_CT_UNREPLIED] / HZ)); NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_REPLIED, htonl(timeouts[UDP_CT_REPLIED] / HZ)); return 0; nla_put_failure: return -ENOSPC; }
static inline int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct) { NLA_PUT_BE32(skb, CTA_ID, htonl((unsigned long)ct)); return 0; nla_put_failure: return -1; }
static inline int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct) { NLA_PUT_BE32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))); return 0; nla_put_failure: return -1; }
static inline int ctnetlink_dump_secmark(struct sk_buff *skb, const struct nf_conn *ct) { NLA_PUT_BE32(skb, CTA_SECMARK, htonl(ct->secmark)); return 0; nla_put_failure: return -1; }
static inline int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct) { NLA_PUT_BE32(skb, CTA_STATUS, htonl(ct->status)); return 0; nla_put_failure: return -1; }
static int nft_meta_dump(struct sk_buff *skb, const struct nft_expr *expr) { struct nft_meta *meta = nft_expr_priv(expr); NLA_PUT_BE32(skb, NFTA_META_KEY, htonl(meta->key)); return 0; nla_put_failure: return -1; }
static int icmpv6_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeout = data; NLA_PUT_BE32(skb, CTA_TIMEOUT_ICMPV6_TIMEOUT, htonl(*timeout / HZ)); return 0; nla_put_failure: return -ENOSPC; }
static int dccp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeouts = data; int i; for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) NLA_PUT_BE32(skb, i, htonl(timeouts[i] / HZ)); return 0; nla_put_failure: return -ENOSPC; }
static int dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type) { struct nlattr *nest_parms; nest_parms = nla_nest_start(skb, type | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; NLA_PUT_BE32(skb, CTA_NAT_SEQ_CORRECTION_POS, htonl(natseq->correction_pos)); NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_BEFORE, htonl(natseq->offset_before)); NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_AFTER, htonl(natseq->offset_after)); nla_nest_end(skb, nest_parms); return 0; nla_put_failure: return -1; }
static int ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct, enum ip_conntrack_dir dir) { enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG; struct nlattr *nest_count; nest_count = nla_nest_start(skb, type | NLA_F_NESTED); if (!nest_count) goto nla_put_failure; NLA_PUT_BE32(skb, CTA_COUNTERS32_PACKETS, htonl(ct->counters[dir].packets)); NLA_PUT_BE32(skb, CTA_COUNTERS32_BYTES, htonl(ct->counters[dir].bytes)); nla_nest_end(skb, nest_count); return 0; nla_put_failure: return -1; }
static inline int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct) { long timeout = (ct->timeout.expires - jiffies) / HZ; if (timeout < 0) timeout = 0; NLA_PUT_BE32(skb, CTA_TIMEOUT, htonl(timeout)); return 0; nla_put_failure: return -1; }
static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh) { struct fib4_rule *rule4 = (struct fib4_rule *) rule; frh->dst_len = rule4->dst_len; frh->src_len = rule4->src_len; frh->tos = rule4->tos; if (rule4->dst_len) NLA_PUT_BE32(skb, FRA_DST, rule4->dst); if (rule4->src_len) NLA_PUT_BE32(skb, FRA_SRC, rule4->src); #ifdef CONFIG_IP_ROUTE_CLASSID if (rule4->tclassid) NLA_PUT_U32(skb, FRA_FLOW, rule4->tclassid); #endif return 0; nla_put_failure: return -ENOBUFS; }
/* This is an inline function, we don't really care about a long * list of arguments */ static inline int __build_packet_message(struct nfulnl_instance *inst, const struct sk_buff *skb, unsigned int data_len, u_int8_t pf, unsigned int hooknum, const struct net_device *indev, const struct net_device *outdev, const char *prefix, unsigned int plen) { struct nfulnl_msg_packet_hdr pmsg; struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; sk_buff_data_t old_tail = inst->skb->tail; nlh = NLMSG_PUT(inst->skb, 0, 0, NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET, sizeof(struct nfgenmsg)); nfmsg = NLMSG_DATA(nlh); nfmsg->nfgen_family = pf; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(inst->group_num); pmsg.hw_protocol = skb->protocol; pmsg.hook = hooknum; NLA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg); if (prefix) NLA_PUT(inst->skb, NFULA_PREFIX, plen, prefix); if (indev) { #ifndef CONFIG_BRIDGE_NETFILTER NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, htonl(indev->ifindex)); #else if (pf == PF_BRIDGE) { /* Case 1: outdev is physical input device, we need to * look for bridge group (when called from * netfilter_bridge) */ NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV, htonl(indev->ifindex)); /* this is the bridge group "brX" */ /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */ NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, htonl(br_port_get_rcu(indev)->br->dev->ifindex)); } else { /* Case 2: indev is bridge group, we need to look for * physical device (when called from ipv4) */ NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, htonl(indev->ifindex)); if (skb->nf_bridge && skb->nf_bridge->physindev) NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV, htonl(skb->nf_bridge->physindev->ifindex)); } #endif } if (outdev) { #ifndef CONFIG_BRIDGE_NETFILTER NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, htonl(outdev->ifindex)); #else if (pf == PF_BRIDGE) { /* Case 1: outdev is physical output device, we need to * look for bridge group (when called from * netfilter_bridge) */ NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, htonl(outdev->ifindex)); /* this is the bridge group "brX" */ /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */ NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, htonl(br_port_get_rcu(outdev)->br->dev->ifindex)); } else { /* Case 2: indev is a bridge group, we need to look * for physical device (when called from ipv4) */ NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, htonl(outdev->ifindex)); if (skb->nf_bridge && skb->nf_bridge->physoutdev) NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, htonl(skb->nf_bridge->physoutdev->ifindex)); } #endif } if (skb->mark) NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark)); if (indev && skb->dev && skb->mac_header != skb->network_header) { struct nfulnl_msg_packet_hw phw; int len = dev_parse_header(skb, phw.hw_addr); if (len > 0) { phw.hw_addrlen = htons(len); NLA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw); } } if (indev && skb_mac_header_was_set(skb)) { NLA_PUT_BE16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)); NLA_PUT_BE16(inst->skb, NFULA_HWLEN, htons(skb->dev->hard_header_len)); NLA_PUT(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, skb_mac_header(skb)); } if (skb->tstamp.tv64) { struct nfulnl_msg_packet_timestamp ts; struct timeval tv = ktime_to_timeval(skb->tstamp); ts.sec = cpu_to_be64(tv.tv_sec); ts.usec = cpu_to_be64(tv.tv_usec); NLA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts); } /* UID */ if (skb->sk) { read_lock_bh(&skb->sk->sk_callback_lock); if (skb->sk->sk_socket && skb->sk->sk_socket->file) { struct file *file = skb->sk->sk_socket->file; __be32 uid = htonl(file->f_cred->fsuid); __be32 gid = htonl(file->f_cred->fsgid); /* need to unlock here since NLA_PUT may goto */ read_unlock_bh(&skb->sk->sk_callback_lock); NLA_PUT_BE32(inst->skb, NFULA_UID, uid); NLA_PUT_BE32(inst->skb, NFULA_GID, gid); } else read_unlock_bh(&skb->sk->sk_callback_lock); } /* local sequence number */ if (inst->flags & NFULNL_CFG_F_SEQ) NLA_PUT_BE32(inst->skb, NFULA_SEQ, htonl(inst->seq++)); /* global sequence number */ if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL, htonl(atomic_inc_return(&global_seq))); if (data_len) { struct nlattr *nla; int size = nla_attr_size(data_len); if (skb_tailroom(inst->skb) < nla_total_size(data_len)) { printk(KERN_WARNING "nfnetlink_log: no tailroom!\n"); goto nlmsg_failure; } nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len)); nla->nla_type = NFULA_PAYLOAD; nla->nla_len = size; if (skb_copy_bits(skb, 0, nla_data(nla), data_len)) BUG(); } nlh->nlmsg_len = inst->skb->tail - old_tail; return 0; nlmsg_failure: nla_put_failure: PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n"); return -1; }
static int tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeouts = data; NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT, htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)); NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_RECV, htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)); NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_ESTABLISHED, htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)); NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_FIN_WAIT, htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)); NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT, htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)); NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_LAST_ACK, htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)); NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_TIME_WAIT, htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)); NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE, htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)); NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT2, htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)); NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_RETRANS, htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)); NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_UNACK, htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ)); return 0; nla_put_failure: return -ENOSPC; }