/* Add encapsulation header. * * The top IP header will be constructed per RFC 2401. */ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct ipv6hdr *top_iph; int dsfield; skb_set_network_header(skb, -x->props.header_len); skb->mac_header = skb->network_header + offsetof(struct ipv6hdr, nexthdr); skb->transport_header = skb->network_header + sizeof(*top_iph); top_iph = ipv6_hdr(skb); top_iph->version = 6; memcpy(top_iph->flow_lbl, XFRM_MODE_SKB_CB(skb)->flow_lbl, sizeof(top_iph->flow_lbl)); top_iph->nexthdr = xfrm_af2proto(skb_dst(skb)->ops->family); dsfield = XFRM_MODE_SKB_CB(skb)->tos; dsfield = INET_ECN_encapsulate(dsfield, dsfield); if (x->props.flags & XFRM_STATE_NOECN) dsfield &= ~INET_ECN_MASK; ipv6_change_dsfield(top_iph, 0, dsfield); top_iph->hop_limit = ip6_dst_hoplimit(dst->child); ipv6_addr_copy(&top_iph->saddr, (const struct in6_addr *)&x->props.saddr); ipv6_addr_copy(&top_iph->daddr, (const struct in6_addr *)&x->id.daddr); return 0; }
int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, struct ipv6_txoptions *opt) { struct net *net = sock_net(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct in6_addr *first_hop = &fl6->daddr; struct dst_entry *dst = skb_dst(skb); struct ipv6hdr *hdr; u8 proto = fl6->flowi6_proto; int seg_len = skb->len; int hlimit = -1; int tclass = 0; u32 mtu; if (opt) { unsigned int head_room; /* First: exthdrs may take lots of space (~8K for now) MAX_HEADER is not enough. */ head_room = opt->opt_nflen + opt->opt_flen; seg_len += head_room; head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev); if (skb_headroom(skb) < head_room) { struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); if (skb2 == NULL) { IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return -ENOBUFS; } kfree_skb(skb); skb = skb2; skb_set_owner_w(skb, sk); } if (opt->opt_flen) ipv6_push_frag_opts(skb, opt, &proto); if (opt->opt_nflen) ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop); } skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); hdr = ipv6_hdr(skb); /* * Fill in the IPv6 header */ if (np) { tclass = np->tclass; hlimit = np->hop_limit; } if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl6->flowlabel; hdr->payload_len = htons(seg_len); hdr->nexthdr = proto; hdr->hop_limit = hlimit; ipv6_addr_copy(&hdr->saddr, &fl6->saddr); ipv6_addr_copy(&hdr->daddr, first_hop); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; mtu = dst_mtu(dst); if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) { IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUT, skb->len); return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, dst_output); } if (net_ratelimit()) printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n"); skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; }
static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen, unsigned int flags) { struct ipv6_pinfo *np = inet6_sk(sk); int len; int val; if (ip6_mroute_opt(optname)) return ip6_mroute_getsockopt(sk, optname, optval, optlen); if (get_user(len, optlen)) return -EFAULT; switch (optname) { case IPV6_ADDRFORM: if (sk->sk_protocol != IPPROTO_UDP && sk->sk_protocol != IPPROTO_UDPLITE && sk->sk_protocol != IPPROTO_TCP) return -ENOPROTOOPT; if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; val = sk->sk_family; break; case MCAST_MSFILTER: { struct group_filter gsf; int err; if (len < GROUP_FILTER_SIZE(0)) return -EINVAL; if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) return -EFAULT; if (gsf.gf_group.ss_family != AF_INET6) return -EADDRNOTAVAIL; lock_sock(sk); err = ip6_mc_msfget(sk, &gsf, (struct group_filter __user *)optval, optlen); release_sock(sk); return err; } case IPV6_2292PKTOPTIONS: { struct msghdr msg; struct sk_buff *skb; if (sk->sk_type != SOCK_STREAM) return -ENOPROTOOPT; msg.msg_control = (void __force_kernel *)optval; msg.msg_controllen = len; msg.msg_flags = flags; lock_sock(sk); skb = np->pktoptions; if (skb) atomic_inc(&skb->users); release_sock(sk); if (skb) { int err = ip6_datagram_recv_ctl(sk, &msg, skb); kfree_skb(skb); if (err) return err; } else { if (np->rxopt.bits.rxinfo) { struct in6_pktinfo src_info; src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif : np->sticky_pktinfo.ipi6_ifindex; src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr; put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info); } if (np->rxopt.bits.rxhlim) { int hlim = np->mcast_hops; put_cmsg(&msg, SOL_IPV6, IPV6_HOPLIMIT, sizeof(hlim), &hlim); } if (np->rxopt.bits.rxtclass) { int tclass = np->rcv_tclass; put_cmsg(&msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass); } if (np->rxopt.bits.rxoinfo) { struct in6_pktinfo src_info; src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif : np->sticky_pktinfo.ipi6_ifindex; src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr; put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info); } if (np->rxopt.bits.rxohlim) { int hlim = np->mcast_hops; put_cmsg(&msg, SOL_IPV6, IPV6_2292HOPLIMIT, sizeof(hlim), &hlim); } } len -= msg.msg_controllen; return put_user(len, optlen); } case IPV6_MTU: { struct dst_entry *dst; val = 0; rcu_read_lock(); dst = __sk_dst_get(sk); if (dst) val = dst_mtu(dst); rcu_read_unlock(); if (!val) return -ENOTCONN; break; } case IPV6_V6ONLY: val = np->ipv6only; break; case IPV6_RECVPKTINFO: val = np->rxopt.bits.rxinfo; break; case IPV6_2292PKTINFO: val = np->rxopt.bits.rxoinfo; break; case IPV6_RECVHOPLIMIT: val = np->rxopt.bits.rxhlim; break; case IPV6_2292HOPLIMIT: val = np->rxopt.bits.rxohlim; break; case IPV6_RECVRTHDR: val = np->rxopt.bits.srcrt; break; case IPV6_2292RTHDR: val = np->rxopt.bits.osrcrt; break; case IPV6_HOPOPTS: case IPV6_RTHDRDSTOPTS: case IPV6_RTHDR: case IPV6_DSTOPTS: { lock_sock(sk); len = ipv6_getsockopt_sticky(sk, np->opt, optname, optval, len); release_sock(sk); /* check if ipv6_getsockopt_sticky() returns err code */ if (len < 0) return len; return put_user(len, optlen); } case IPV6_RECVHOPOPTS: val = np->rxopt.bits.hopopts; break; case IPV6_2292HOPOPTS: val = np->rxopt.bits.ohopopts; break; case IPV6_RECVDSTOPTS: val = np->rxopt.bits.dstopts; break; case IPV6_2292DSTOPTS: val = np->rxopt.bits.odstopts; break; case IPV6_TCLASS: val = np->tclass; break; case IPV6_RECVTCLASS: val = np->rxopt.bits.rxtclass; break; case IPV6_FLOWINFO: val = np->rxopt.bits.rxflow; break; case IPV6_RECVPATHMTU: val = np->rxopt.bits.rxpmtu; break; case IPV6_PATHMTU: { struct dst_entry *dst; struct ip6_mtuinfo mtuinfo; if (len < sizeof(mtuinfo)) return -EINVAL; len = sizeof(mtuinfo); memset(&mtuinfo, 0, sizeof(mtuinfo)); rcu_read_lock(); dst = __sk_dst_get(sk); if (dst) mtuinfo.ip6m_mtu = dst_mtu(dst); rcu_read_unlock(); if (!mtuinfo.ip6m_mtu) return -ENOTCONN; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &mtuinfo, len)) return -EFAULT; return 0; break; } case IPV6_TRANSPARENT: val = inet_sk(sk)->transparent; break; case IPV6_RECVORIGDSTADDR: val = np->rxopt.bits.rxorigdstaddr; break; case IPV6_UNICAST_HOPS: case IPV6_MULTICAST_HOPS: { struct dst_entry *dst; if (optname == IPV6_UNICAST_HOPS) val = np->hop_limit; else val = np->mcast_hops; if (val < 0) { rcu_read_lock(); dst = __sk_dst_get(sk); if (dst) val = ip6_dst_hoplimit(dst); rcu_read_unlock(); } if (val < 0) val = sock_net(sk)->ipv6.devconf_all->hop_limit; break; } case IPV6_MULTICAST_LOOP: val = np->mc_loop; break; case IPV6_MULTICAST_IF: val = np->mcast_oif; break; case IPV6_UNICAST_IF: val = (__force int)htonl((__u32) np->ucast_oif); break; case IPV6_MTU_DISCOVER: val = np->pmtudisc; break; case IPV6_RECVERR: val = np->recverr; break; case IPV6_FLOWINFO_SEND: val = np->sndflow; break; case IPV6_ADDR_PREFERENCES: val = 0; if (np->srcprefs & IPV6_PREFER_SRC_TMP) val |= IPV6_PREFER_SRC_TMP; else if (np->srcprefs & IPV6_PREFER_SRC_PUBLIC) val |= IPV6_PREFER_SRC_PUBLIC; else { /* XXX: should we return system default? */ val |= IPV6_PREFER_SRC_PUBTMP_DEFAULT; } if (np->srcprefs & IPV6_PREFER_SRC_COA) val |= IPV6_PREFER_SRC_COA; else val |= IPV6_PREFER_SRC_HOME; break; case IPV6_MINHOPCOUNT: val = np->min_hopcount; break; case IPV6_DONTFRAG: val = np->dontfrag; break; default: return -ENOPROTOOPT; } len = min_t(unsigned int, sizeof(int), len); if(put_user(len, optlen)) return -EFAULT; if(copy_to_user(optval,&val,len)) return -EFAULT; return 0; }
/* tipc_send_msg - enqueue a send request */ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb, struct tipc_bearer *b, struct tipc_media_addr *dest) { int ttl, err = 0; struct udp_bearer *ub; struct udp_media_addr *dst = (struct udp_media_addr *)&dest->value; struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value; struct rtable *rt; if (skb_headroom(skb) < UDP_MIN_HEADROOM) { err = pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC); if (err) goto tx_error; } skb_set_inner_protocol(skb, htons(ETH_P_TIPC)); ub = rcu_dereference_rtnl(b->media_ptr); if (!ub) { err = -ENODEV; goto tx_error; } if (dst->proto == htons(ETH_P_IP)) { struct flowi4 fl = { .daddr = dst->ipv4.s_addr, .saddr = src->ipv4.s_addr, .flowi4_mark = skb->mark, .flowi4_proto = IPPROTO_UDP }; rt = ip_route_output_key(net, &fl); if (IS_ERR(rt)) { err = PTR_ERR(rt); goto tx_error; } skb->dev = rt->dst.dev; ttl = ip4_dst_hoplimit(&rt->dst); udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb, src->ipv4.s_addr, dst->ipv4.s_addr, 0, ttl, 0, src->port, dst->port, false, true); #if IS_ENABLED(CONFIG_IPV6) } else { struct dst_entry *ndst; struct flowi6 fl6 = { .flowi6_oif = ub->ifindex, .daddr = dst->ipv6, .saddr = src->ipv6, .flowi6_proto = IPPROTO_UDP }; err = ipv6_stub->ipv6_dst_lookup(net, ub->ubsock->sk, &ndst, &fl6); if (err) goto tx_error; ttl = ip6_dst_hoplimit(ndst); err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, ndst->dev, &src->ipv6, &dst->ipv6, 0, ttl, 0, src->port, dst->port, false); #endif } return err; tx_error: kfree_skb(skb); return err; } /* tipc_udp_recv - read data from bearer socket */ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb) { struct udp_bearer *ub; struct tipc_bearer *b; ub = rcu_dereference_sk_user_data(sk); if (!ub) { pr_err_ratelimited("Failed to get UDP bearer reference"); kfree_skb(skb); return 0; } skb_pull(skb, sizeof(struct udphdr)); rcu_read_lock(); b = rcu_dereference_rtnl(ub->bearer); if (b) { tipc_rcv(sock_net(sk), skb, b); rcu_read_unlock(); return 0; } rcu_read_unlock(); kfree_skb(skb); return 0; } static int enable_mcast(struct udp_bearer *ub, struct udp_media_addr *remote) { int err = 0; struct ip_mreqn mreqn; struct sock *sk = ub->ubsock->sk; if (ntohs(remote->proto) == ETH_P_IP) { if (!ipv4_is_multicast(remote->ipv4.s_addr)) return 0; mreqn.imr_multiaddr = remote->ipv4; mreqn.imr_ifindex = ub->ifindex; err = ip_mc_join_group(sk, &mreqn); #if IS_ENABLED(CONFIG_IPV6) } else { if (!ipv6_addr_is_multicast(&remote->ipv6)) return 0; err = ipv6_stub->ipv6_sock_mc_join(sk, ub->ifindex, &remote->ipv6); #endif } return err; }
int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct icmp6hdr user_icmph; int addr_type; struct in6_addr *daddr; int iif = 0; struct flowi6 fl6; int err; int hlimit; struct dst_entry *dst; struct rt6_info *rt; struct pingfakehdr pfh; pr_debug("ping_v6_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num); err = ping_common_sendmsg(AF_INET6, msg, len, &user_icmph, sizeof(user_icmph)); if (err) return err; if (msg->msg_name) { struct sockaddr_in6 *u = (struct sockaddr_in6 *) msg->msg_name; if (msg->msg_namelen < sizeof(struct sockaddr_in6) || u->sin6_family != AF_INET6) { return -EINVAL; } if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != u->sin6_scope_id) { return -EINVAL; } daddr = &(u->sin6_addr); iif = u->sin6_scope_id; } else { if (sk->sk_state != TCP_ESTABLISHED) return -EDESTADDRREQ; daddr = &np->daddr; } if (!iif) iif = sk->sk_bound_dev_if; addr_type = ipv6_addr_type(daddr); if (__ipv6_addr_needs_scope_id(addr_type) && !iif) return -EINVAL; if (addr_type & IPV6_ADDR_MAPPED) return -EINVAL; /* TODO: use ip6_datagram_send_ctl to get options from cmsg */ memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_ICMPV6; fl6.saddr = np->saddr; fl6.daddr = *daddr; fl6.flowi6_mark = sk->sk_mark; fl6.fl6_icmp_type = user_icmph.icmp6_type; fl6.fl6_icmp_code = user_icmph.icmp6_code; fl6.flowi6_uid = sock_i_uid(sk); security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) fl6.flowi6_oif = np->mcast_oif; else if (!fl6.flowi6_oif) fl6.flowi6_oif = np->ucast_oif; dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr, 1); if (IS_ERR(dst)) return PTR_ERR(dst); rt = (struct rt6_info *) dst; np = inet6_sk(sk); if (!np) return -EBADF; if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) fl6.flowi6_oif = np->mcast_oif; else if (!fl6.flowi6_oif) fl6.flowi6_oif = np->ucast_oif; pfh.icmph.type = user_icmph.icmp6_type; pfh.icmph.code = user_icmph.icmp6_code; pfh.icmph.checksum = 0; pfh.icmph.un.echo.id = inet->inet_sport; pfh.icmph.un.echo.sequence = user_icmph.icmp6_sequence; pfh.iov = msg->msg_iov; pfh.wcheck = 0; pfh.family = AF_INET6; if (ipv6_addr_is_multicast(&fl6.daddr)) hlimit = np->mcast_hops; else hlimit = np->hop_limit; if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); lock_sock(sk); err = ip6_append_data(sk, ping_getfrag, &pfh, len, 0, hlimit, np->tclass, NULL, &fl6, rt, MSG_DONTWAIT, np->dontfrag); if (err) { ICMP6_INC_STATS_BH(sock_net(sk), rt->rt6i_idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); } else { err = icmpv6_push_pending_frames(sk, &fl6, (struct icmp6hdr *) &pfh.icmph, len); } release_sock(sk); if (err) return err; return len; }
/* * xmit an sk_buff (used by TCP, SCTP and DCCP) * Note : socket lock is not held for SYNACK packets, but might be modified * by calls to skb_set_owner_w() and ipv6_local_error(), * which are using proper atomic operations or spinlocks. */ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, struct ipv6_txoptions *opt, int tclass) { struct net *net = sock_net(sk); const struct ipv6_pinfo *np = inet6_sk(sk); struct in6_addr *first_hop = &fl6->daddr; struct dst_entry *dst = skb_dst(skb); struct ipv6hdr *hdr; u8 proto = fl6->flowi6_proto; int seg_len = skb->len; int hlimit = -1; u32 mtu; if (opt) { unsigned int head_room; /* First: exthdrs may take lots of space (~8K for now) MAX_HEADER is not enough. */ head_room = opt->opt_nflen + opt->opt_flen; seg_len += head_room; head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev); if (skb_headroom(skb) < head_room) { struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); if (!skb2) { IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return -ENOBUFS; } consume_skb(skb); skb = skb2; /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically, * it is safe to call in our context (socket lock not held) */ skb_set_owner_w(skb, (struct sock *)sk); } if (opt->opt_flen) ipv6_push_frag_opts(skb, opt, &proto); if (opt->opt_nflen) ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop); } skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); hdr = ipv6_hdr(skb); /* * Fill in the IPv6 header */ if (np) hlimit = np->hop_limit; if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel, np->autoflowlabel, fl6)); hdr->payload_len = htons(seg_len); hdr->nexthdr = proto; hdr->hop_limit = hlimit; hdr->saddr = fl6->saddr; hdr->daddr = *first_hop; skb->protocol = htons(ETH_P_IPV6); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; mtu = dst_mtu(dst); if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) { IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUT, skb->len); /* hooks should never assume socket lock is held. * we promote our socket to non const */ return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, (struct sock *)sk, skb, NULL, dst->dev, dst_output); } skb->dev = dst->dev; /* ipv6_local_error() does not require socket lock, * we promote our socket to non const */ ipv6_local_error((struct sock *)sk, EMSGSIZE, fl6, mtu); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; }
int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, struct ipv6_txoptions *opt) { struct ipv6_pinfo *np = inet6_sk(sk); struct in6_addr *first_hop = &fl->fl6_dst; struct dst_entry *dst = skb->dst; struct ipv6hdr *hdr; u8 proto = fl->proto; int seg_len = skb->len; int hlimit, tclass; u32 mtu; if (opt) { unsigned int head_room; /* First: exthdrs may take lots of space (~8K for now) MAX_HEADER is not enough. */ head_room = opt->opt_nflen + opt->opt_flen; seg_len += head_room; head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev); if (skb_headroom(skb) < head_room) { struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); if (skb2 == NULL) { IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return -ENOBUFS; } kfree_skb(skb); skb = skb2; skb_set_owner_w(skb, sk); } if (opt->opt_flen) ipv6_push_frag_opts(skb, opt, &proto); if (opt->opt_nflen) ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop); } hdr = skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, sizeof(struct ipv6hdr)); /* * Fill in the IPv6 header */ hlimit = -1; if (np) hlimit = np->hop_limit; if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); tclass = -1; if (np) tclass = np->tclass; if (tclass < 0) tclass = 0; *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel; hdr->payload_len = htons(seg_len); hdr->nexthdr = proto; hdr->hop_limit = hlimit; ipv6_addr_copy(&hdr->saddr, &fl->fl6_src); ipv6_addr_copy(&hdr->daddr, first_hop); skb->protocol = htons(ETH_P_IPV6); skb->priority = sk->sk_priority; mtu = ip6_skb_dst_mtu(skb); if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) { IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_OUTREQUESTS); #if defined (CONFIG_RA_HW_NAT_IPV6) #if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE) FOE_MAGIC_TAG(skb) = 0; FOE_AI_UNHIT(skb); #endif #endif return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output); } skb->dev = dst->dev; ipv6_local_error(sk, EMSGSIZE, fl, mtu); IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; }
/* Send RST reply */ static void send_reset(struct net *net, struct sk_buff *oldskb) { struct sk_buff *nskb; struct tcphdr otcph, *tcph; unsigned int otcplen, hh_len; int tcphoff, needs_ack; const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); struct ipv6hdr *ip6h; #define DEFAULT_TOS_VALUE 0x0U const __u8 tclass = DEFAULT_TOS_VALUE; struct dst_entry *dst = NULL; u8 proto; __be16 frag_off; struct flowi6 fl6; if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) || (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) { pr_debug("addr is not unicast.\n"); return; } proto = oip6h->nexthdr; tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off); if ((tcphoff < 0) || (tcphoff > oldskb->len)) { pr_debug("Cannot get TCP header.\n"); return; } otcplen = oldskb->len - tcphoff; /* IP header checks: fragment, too short. */ if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) { pr_debug("proto(%d) != IPPROTO_TCP, " "or too short. otcplen = %d\n", proto, otcplen); return; } if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr))) BUG(); /* No RST for RST. */ if (otcph.rst) { pr_debug("RST is set\n"); return; } /* Check checksum. */ if (csum_ipv6_magic(&oip6h->saddr, &oip6h->daddr, otcplen, IPPROTO_TCP, skb_checksum(oldskb, tcphoff, otcplen, 0))) { pr_debug("TCP checksum is invalid\n"); return; } memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_TCP; fl6.saddr = oip6h->daddr; fl6.daddr = oip6h->saddr; fl6.fl6_sport = otcph.dest; fl6.fl6_dport = otcph.source; security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); dst = ip6_route_output(net, NULL, &fl6); if (dst == NULL || dst->error) { dst_release(dst); return; } dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); if (IS_ERR(dst)) return; hh_len = (dst->dev->hard_header_len + 15)&~15; nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr) + sizeof(struct tcphdr) + dst->trailer_len, GFP_ATOMIC); if (!nskb) { net_dbg_ratelimited("cannot alloc skb\n"); dst_release(dst); return; } skb_dst_set(nskb, dst); skb_reserve(nskb, hh_len + dst->header_len); skb_put(nskb, sizeof(struct ipv6hdr)); skb_reset_network_header(nskb); ip6h = ipv6_hdr(nskb); ip6_flow_hdr(ip6h, tclass, 0); ip6h->hop_limit = ip6_dst_hoplimit(dst); ip6h->nexthdr = IPPROTO_TCP; ip6h->saddr = oip6h->daddr; ip6h->daddr = oip6h->saddr; skb_reset_transport_header(nskb); tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); /* Truncate to length (no data) */ tcph->doff = sizeof(struct tcphdr)/4; tcph->source = otcph.dest; tcph->dest = otcph.source; if (otcph.ack) { needs_ack = 0; tcph->seq = otcph.ack_seq; tcph->ack_seq = 0; } else { needs_ack = 1; tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin + otcplen - (otcph.doff<<2)); tcph->seq = 0; } /* Reset flags */ ((u_int8_t *)tcph)[13] = 0; tcph->rst = 1; tcph->ack = needs_ack; tcph->window = 0; tcph->urg_ptr = 0; tcph->check = 0; /* Adjust TCP checksum */ tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr, &ipv6_hdr(nskb)->daddr, sizeof(struct tcphdr), IPPROTO_TCP, csum_partial(tcph, sizeof(struct tcphdr), 0)); nf_ct_attach(nskb, oldskb); ip6_local_out(nskb); }
static void tarpit_tcp6(struct net *net, struct sk_buff *oldskb, unsigned int hook, unsigned int mode) { struct sk_buff *nskb; struct tcphdr *tcph, oth; unsigned int otcplen; int tcphoff; const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); struct ipv6hdr *ip6h; const uint8_t tclass = 0; uint8_t proto; uint16_t payload; __be16 frag_off; proto = oip6h->nexthdr; tcphoff = ipv6_skip_exthdr(oldskb, (uint8_t *)(oip6h + 1) - oldskb->data, &proto, &frag_off); if (tcphoff < 0 || tcphoff > oldskb->len) { pr_debug("Cannot get TCP header.\n"); return; } otcplen = oldskb->len - tcphoff; /* IP header checks: fragment, too short. */ if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) { pr_debug("proto(%d) != IPPROTO_TCP, " "or too short. otcplen = %d\n", proto, otcplen); return; } if (skb_copy_bits(oldskb, tcphoff, &oth, sizeof(struct tcphdr))) { WARN_ON(1); return; } /* Check checksum. */ if (csum_ipv6_magic(&oip6h->saddr, &oip6h->daddr, otcplen, IPPROTO_TCP, skb_checksum(oldskb, tcphoff, otcplen, 0))) { pr_debug("TCP checksum is invalid\n"); return; } nskb = skb_copy_expand(oldskb, LL_MAX_HEADER, skb_tailroom(oldskb), GFP_ATOMIC); if (nskb == NULL) { if (net_ratelimit()) pr_debug("cannot alloc skb\n"); return; } /* This packet will not be the same as the other: clear nf fields */ nf_reset(nskb); skb_nfmark(nskb) = 0; skb_init_secmark(nskb); skb_shinfo(nskb)->gso_size = 0; skb_shinfo(nskb)->gso_segs = 0; skb_shinfo(nskb)->gso_type = 0; skb_put(nskb, sizeof(struct ipv6hdr)); ip6h = ipv6_hdr(nskb); *(__be32 *)ip6h = htonl(0x60000000 | (tclass << 20)); ip6h->nexthdr = IPPROTO_TCP; ip6h->saddr = oip6h->daddr; ip6h->daddr = oip6h->saddr; /* Adjust IP TTL */ if (mode == XTTARPIT_HONEYPOT) { ip6h->hop_limit = 128; } else { ip6h->hop_limit = ip6_dst_hoplimit(skb_dst(nskb)); } tcph = (struct tcphdr *)(skb_network_header(nskb) + sizeof(struct ipv6hdr)); /* Truncate to length (no data) */ skb_trim(nskb, sizeof(struct ipv6hdr) + sizeof(struct tcphdr)); tcph->doff = sizeof(struct tcphdr)/4; tcph->source = oth.dest; tcph->dest = oth.source; tcph->urg_ptr = 0; /* Reset flags */ ((uint8_t *)tcph)[13] = 0; payload = nskb->len - sizeof(struct ipv6hdr) - sizeof(struct tcphdr); if (!tarpit_generic(&oth, tcph, payload, mode)) goto free_nskb; ip6h->payload_len = htons(sizeof(struct tcphdr)); tcph->check = 0; /* Adjust TCP checksum */ tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr, &ipv6_hdr(nskb)->daddr, sizeof(struct tcphdr), IPPROTO_TCP, csum_partial(tcph, sizeof(struct tcphdr), 0)); if (ip6_route_me_harder(net, nskb)) goto free_nskb; nskb->ip_summed = CHECKSUM_NONE; nf_ct_attach(nskb, oldskb); NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, nskb->sk, nskb, NULL, skb_dst(nskb)->dev, dst_output); return; free_nskb: kfree_skb(nskb); }
static unsigned int echo_tg6(struct sk_buff *oldskb, const struct xt_action_param *par) { const struct udphdr *oldudp; const struct ipv6hdr *oldip; struct udphdr *newudp, oldudp_buf; struct ipv6hdr *newip; struct sk_buff *newskb; unsigned int data_len; void *payload; struct flowi6 fl; struct dst_entry *dst = NULL; struct net *net = dev_net((par->in != NULL) ? par->in : par->out); /* This allows us to do the copy operation in fewer lines of code. */ if (skb_linearize(oldskb) < 0) return NF_DROP; oldip = ipv6_hdr(oldskb); oldudp = skb_header_pointer(oldskb, par->thoff, sizeof(*oldudp), &oldudp_buf); if (oldudp == NULL) return NF_DROP; if (ntohs(oldudp->len) <= sizeof(*oldudp)) return NF_DROP; newskb = alloc_skb(LL_MAX_HEADER + sizeof(*newip) + ntohs(oldudp->len), GFP_ATOMIC); if (newskb == NULL) return NF_DROP; skb_reserve(newskb, LL_MAX_HEADER); newskb->protocol = oldskb->protocol; skb_reset_network_header(newskb); newip = (void *)skb_put(newskb, sizeof(*newip)); newip->version = oldip->version; newip->priority = oldip->priority; memcpy(newip->flow_lbl, oldip->flow_lbl, sizeof(newip->flow_lbl)); newip->nexthdr = par->target->proto; newip->saddr = oldip->daddr; newip->daddr = oldip->saddr; skb_reset_transport_header(newskb); newudp = (void *)skb_put(newskb, sizeof(*newudp)); newudp->source = oldudp->dest; newudp->dest = oldudp->source; newudp->len = oldudp->len; data_len = htons(oldudp->len) - sizeof(*oldudp); payload = skb_header_pointer(oldskb, par->thoff + sizeof(*oldudp), data_len, NULL); memcpy(skb_put(newskb, data_len), payload, data_len); #if 0 /* * Since no fields are modified (we just swapped things around), * this works too in our specific echo case. */ newudp->check = oldudp->check; #else newudp->check = 0; newudp->check = csum_ipv6_magic(&newip->saddr, &newip->daddr, ntohs(newudp->len), IPPROTO_UDP, csum_partial(newudp, ntohs(newudp->len), 0)); #endif memset(&fl, 0, sizeof(fl)); fl.flowi6_proto = newip->nexthdr; memcpy(&fl.saddr, &newip->saddr, sizeof(fl.saddr)); memcpy(&fl.daddr, &newip->daddr, sizeof(fl.daddr)); fl.fl6_sport = newudp->source; fl.fl6_dport = newudp->dest; security_skb_classify_flow((struct sk_buff *)oldskb, flowi6_to_flowi(&fl)); dst = ip6_route_output(net, NULL, &fl); if (dst == NULL || dst->error != 0) { dst_release(dst); goto free_nskb; } skb_dst_set(newskb, dst); newip->hop_limit = ip6_dst_hoplimit(skb_dst(newskb)); newskb->ip_summed = CHECKSUM_NONE; /* "Never happens" (?) */ if (newskb->len > dst_mtu(skb_dst(newskb))) goto free_nskb; nf_ct_attach(newskb, oldskb); ip6_local_out(newskb); return NF_DROP; free_nskb: kfree_skb(newskb); return NF_DROP; }
void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) { struct sk_buff *nskb; struct tcphdr _otcph; const struct tcphdr *otcph; unsigned int otcplen, hh_len; const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); struct ipv6hdr *ip6h; struct dst_entry *dst = NULL; struct flowi6 fl6; if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) || (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) { pr_debug("addr is not unicast.\n"); return; } otcph = nf_reject_ip6_tcphdr_get(oldskb, &_otcph, &otcplen, hook); if (!otcph) return; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_TCP; fl6.saddr = oip6h->daddr; fl6.daddr = oip6h->saddr; fl6.fl6_sport = otcph->dest; fl6.fl6_dport = otcph->source; security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); dst = ip6_route_output(net, NULL, &fl6); if (dst == NULL || dst->error) { dst_release(dst); return; } dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); if (IS_ERR(dst)) return; hh_len = (dst->dev->hard_header_len + 15)&~15; nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr) + sizeof(struct tcphdr) + dst->trailer_len, GFP_ATOMIC); if (!nskb) { net_dbg_ratelimited("cannot alloc skb\n"); dst_release(dst); return; } skb_dst_set(nskb, dst); skb_reserve(nskb, hh_len + dst->header_len); ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, ip6_dst_hoplimit(dst)); nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen); nf_ct_attach(nskb, oldskb); #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) /* If we use ip6_local_out for bridged traffic, the MAC source on * the RST will be ours, instead of the destination's. This confuses * some routers/firewalls, and they drop the packet. So we need to * build the eth header using the original destination's MAC as the * source, and send the RST packet directly. */ if (oldskb->nf_bridge) { struct ethhdr *oeth = eth_hdr(oldskb); nskb->dev = nf_bridge_get_physindev(oldskb); nskb->protocol = htons(ETH_P_IPV6); ip6h->payload_len = htons(sizeof(struct tcphdr)); if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol), oeth->h_source, oeth->h_dest, nskb->len) < 0) return; dev_queue_xmit(nskb); } else #endif ip6_local_out(net, nskb->sk, nskb); }