static inline int serval_ip_local_out(struct sk_buff *skb) { int err; #if defined(OS_LINUX_KERNEL) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)) err = ip_local_out(skb); #else struct iphdr *iph = ip_hdr(skb); iph->tot_len = htons(skb->len); ip_send_check(iph); err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output); #endif #else /* OS_USER */ /* Calculate checksum */ ip_send_check(ip_hdr(skb)); err = dev_queue_xmit(skb); #endif if (err < 0) { LOG_ERR("packet_xmit failed err=%d\n", err); } return err; }
struct iphdr *dsr_build_ip(struct dsr_pkt *dp, struct in_addr src, struct in_addr dst, int ip_len, int tot_len, int protocol, int ttl) { struct iphdr *iph; dp->nh.iph = iph = (struct iphdr *)dp->ip_data; if (dp->skb && SKB_NETWORK_HDR_RAW(dp->skb)) { memcpy(dp->ip_data, SKB_NETWORK_HDR_RAW(dp->skb), ip_len); } else { iph->version = IPVERSION; iph->ihl = 5; iph->tos = 0; iph->id = 0; iph->frag_off = 0; iph->ttl = (ttl ? ttl : IPDEFTTL); iph->saddr = src.s_addr; iph->daddr = dst.s_addr; } iph->tot_len = htons(tot_len); iph->protocol = protocol; ip_send_check(iph); return iph; }
static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb, __be32 saddr, __be32 daddr, __u8 proto, __u8 tos, __u8 ttl, __be16 df, bool xnet) { struct iphdr *iph; skb_scrub_packet(skb, xnet); skb_clear_hash(skb); skb_dst_set(skb, dst_clone(dst)); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = IPVERSION; iph->ihl = sizeof(struct iphdr) >> 2; iph->frag_off = df; iph->protocol = proto; iph->tos = tos; iph->daddr = daddr; iph->saddr = saddr; iph->ttl = ttl; __ip_select_ident(dev_net(dst->dev), iph, skb_shinfo(skb)->gso_segs ?: 1); iph->tot_len = htons(skb->len); ip_send_check(iph); }
int xfrm4_transport_finish(struct sk_buff *skb, int async) { struct xfrm_offload *xo = xfrm_offload(skb); struct iphdr *iph = ip_hdr(skb); iph->protocol = XFRM_MODE_SKB_CB(skb)->protocol; #ifndef CONFIG_NETFILTER if (!async) return -iph->protocol; #endif __skb_push(skb, skb->data - skb_network_header(skb)); iph->tot_len = htons(skb->len); ip_send_check(iph); if (xo && (xo->flags & XFRM_GRO)) { skb_mac_header_rebuild(skb); return 0; } NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, dev_net(skb->dev), NULL, skb, skb->dev, NULL, xfrm4_rcv_encap_finish); return 0; }
static int ah_output(struct xfrm_state *x, struct sk_buff *skb) { int err; struct iphdr *iph, *top_iph; struct ip_auth_hdr *ah; struct ah_data *ahp; union { struct iphdr iph; char buf[60]; } tmp_iph; top_iph = skb->nh.iph; iph = &tmp_iph.iph; iph->tos = top_iph->tos; iph->ttl = top_iph->ttl; iph->frag_off = top_iph->frag_off; if (top_iph->ihl != 5) { iph->daddr = top_iph->daddr; memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); err = ip_clear_mutable_options(top_iph, &top_iph->daddr); if (err) goto error; } ah = (struct ip_auth_hdr *)((char *)top_iph+top_iph->ihl*4); ah->nexthdr = top_iph->protocol; top_iph->tos = 0; top_iph->tot_len = htons(skb->len); top_iph->frag_off = 0; top_iph->ttl = 0; top_iph->protocol = IPPROTO_AH; top_iph->check = 0; ahp = x->data; ah->hdrlen = (XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len) >> 2) - 2; ah->reserved = 0; ah->spi = x->id.spi; ah->seq_no = htonl(++x->replay.oseq); ahp->icv(ahp, skb, ah->auth_data); top_iph->tos = iph->tos; top_iph->ttl = iph->ttl; top_iph->frag_off = iph->frag_off; if (top_iph->ihl != 5) { top_iph->daddr = iph->daddr; memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); } ip_send_check(top_iph); err = 0; error: return err; }
/* * Bypass transmitter * Let packets bypass the destination when the destination is not * available, it may be only used in transparent cache cluster. */ int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) { struct iphdr *iph = ip_hdr(skb); EnterFunction(10); rcu_read_lock(); if (__ip_vs_get_out_rt(skb, NULL, iph->daddr, IP_VS_RT_MODE_NON_LOCAL, NULL) < 0) goto tx_error; ip_send_check(iph); /* Another hack: avoid icmp_send in ip_fragment */ skb->local_df = 1; ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0); rcu_read_unlock(); LeaveFunction(10); return NF_STOLEN; tx_error: kfree_skb(skb); rcu_read_unlock(); LeaveFunction(10); return NF_STOLEN; }
/* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT) * or the bridge port (NF_BRIDGE PREROUTING). */ static void nft_reject_br_send_v4_tcp_reset(struct net *net, struct sk_buff *oldskb, const struct net_device *dev, int hook) { struct sk_buff *nskb; struct iphdr *niph; const struct tcphdr *oth; struct tcphdr _oth; if (!nft_bridge_iphdr_validate(oldskb)) return; oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook); if (!oth) return; nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + LL_MAX_HEADER, GFP_ATOMIC); if (!nskb) return; skb_reserve(nskb, LL_MAX_HEADER); niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, net->ipv4.sysctl_ip_default_ttl); nf_reject_ip_tcphdr_put(nskb, oldskb, oth); niph->ttl = net->ipv4.sysctl_ip_default_ttl; niph->tot_len = htons(nskb->len); ip_send_check(niph); nft_reject_br_push_etherhdr(oldskb, nskb); br_forward(br_port_get_rcu(dev), nskb, false, true); }
// 1. Redirect packets from client to the service provider static unsigned int hook_func_in(unsigned int hooknum, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct iphdr *iph = NULL; struct tcphdr *tcph=NULL; int tcplen; if (!in) return NF_ACCEPT; iph = ip_hdr(skb); if (unlikely(iph == NULL)) return NF_ACCEPT; /* Packets destinated to the mbox will be redirected to the service provider*/ if(iph->daddr == middlebox_networkip) { if(iph->protocol == IPPROTO_TCP) { // obtains the tcp header tcph = (struct tcphdr *)((__u32 *)iph + iph->ihl); // irrelevant packets // 9877 is the port listened by the service provider if (ntohs(tcph->dest) != 9877) return NF_ACCEPT; // redirect packet to the service provider iph->daddr = redirect_networkip; /* // Here we can add additional data into the packet to serve as network capabilities // 1. Additional data should be appended into the payload. We cannot add these data in front of the skb_buff since that the front space stores the packet headers, which are crutial for TCP/IP // 2. We need to check the tailroom before appending new data into the skb_buff. // 3. A module at the service provider needs to strip the added data to deliever original data context // Check whether the skb_buff has at least 40 bytes and make sure the skb_buff has not been paged. // The pagement of the skb_buff can be avoided once we disable the TSO offloading if (skb_tailroom(skb) >= 40 && skb->data_len == 0) { secure = network_capabilities // append the capabilities to the data payload secure = skb_put(skb, 40); } */ // recompute tcp checksum // This is very important, otherwise the packet will be dropped due to checksum error tcplen = skb->len - ip_hdrlen(skb); tcph->check = 0; tcph->check = tcp_v4_check(tcplen, iph->saddr, iph->daddr, csum_partial(tcph, tcplen, 0)); // recompute the IP checksum skb->ip_summed = CHECKSUM_NONE; ip_send_check(iph); //printk(KERN_INFO "Packet length after PRE_ROUTING: %u\n", ntohs(iph->tot_len)); } } return NF_ACCEPT; }
static unsigned int tee_tg4(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_tee_tginfo *info = par->targinfo; struct iphdr *iph; if (percpu_read(tee_active)) return XT_CONTINUE; skb = pskb_copy(skb, GFP_ATOMIC); if (skb == NULL) return XT_CONTINUE; #ifdef WITH_CONNTRACK nf_conntrack_put(skb->nfct); skb->nfct = &nf_ct_untracked_get()->ct_general; skb->nfctinfo = IP_CT_NEW; nf_conntrack_get(skb->nfct); #endif iph = ip_hdr(skb); iph->frag_off |= htons(IP_DF); if (par->hooknum == NF_INET_PRE_ROUTING || par->hooknum == NF_INET_LOCAL_IN) --iph->ttl; ip_send_check(iph); if (tee_tg_route4(skb, info)) { percpu_write(tee_active, true); ip_local_out(skb); percpu_write(tee_active, false); } else { kfree_skb(skb); } return XT_CONTINUE; }
static int igmp_send_report(struct device *dev, u32 group, int type) { struct sk_buff *skb; struct iphdr *iph; struct igmphdr *ih; struct rtable *rt; u32 dst; /* According to IGMPv2 specs, LEAVE messages are * sent to all-routers group. */ dst = group; if (type == IGMP_HOST_LEAVE_MESSAGE) dst = IGMP_ALL_ROUTER; if (ip_route_output(&rt, dst, 0, 0, dev->ifindex)) return -1; if (rt->rt_src == 0) { ip_rt_put(rt); return -1; } skb=alloc_skb(IGMP_SIZE+dev->hard_header_len+15, GFP_ATOMIC); if (skb == NULL) { ip_rt_put(rt); return -1; } skb->dst = &rt->u.dst; skb_reserve(skb, (dev->hard_header_len+15)&~15); skb->nh.iph = iph = (struct iphdr *)skb_put(skb, sizeof(struct iphdr)+4); iph->version = 4; iph->ihl = (sizeof(struct iphdr)+4)>>2; iph->tos = 0; iph->frag_off = 0; iph->ttl = 1; iph->daddr = dst; iph->saddr = rt->rt_src; iph->protocol = IPPROTO_IGMP; iph->tot_len = htons(IGMP_SIZE); iph->id = htons(ip_id_count++); ((u8*)&iph[1])[0] = IPOPT_RA; ((u8*)&iph[1])[1] = 4; ((u8*)&iph[1])[2] = 0; ((u8*)&iph[1])[3] = 0; ip_send_check(iph); ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr)); ih->type=type; ih->code=0; ih->csum=0; ih->group=group; ih->csum=ip_compute_csum((void *)ih, sizeof(struct igmphdr)); return skb->dst->output(skb); }
/* * Direct Routing transmitter * Used for ANY protocol */ int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp) { struct rtable *rt; /* Route to the other host */ struct iphdr *iph = ip_hdr(skb); int mtu; EnterFunction(10); if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, RT_TOS(iph->tos), IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL))) goto tx_error_icmp; if (rt->rt_flags & RTCF_LOCAL) { ip_rt_put(rt); IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1); } /* MTU checking */ mtu = dst_mtu(&rt->dst); if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu && !skb_is_gso(skb)) { icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); ip_rt_put(rt); IP_VS_DBG_RL("%s(): frag needed\n", __func__); goto tx_error; } /* * Call ip_send_check because we are not sure it is called * after ip_defrag. Is copy-on-write needed? */ if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) { ip_rt_put(rt); return NF_STOLEN; } ip_send_check(ip_hdr(skb)); /* drop old route */ skb_dst_drop(skb); skb_dst_set(skb, &rt->dst); /* Another hack: avoid icmp_send in ip_fragment */ skb->local_df = 1; IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 0); LeaveFunction(10); return NF_STOLEN; tx_error_icmp: dst_link_failure(skb); tx_error: kfree_skb(skb); LeaveFunction(10); return NF_STOLEN; }
static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags) { struct iphdr *iph; int ntkoff; ntkoff = skb_network_offset(skb); if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff)) goto fail; iph = ip_hdr(skb); switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) { case IPPROTO_ICMP: if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4, ntohs(iph->tot_len))) goto fail; break; case IPPROTO_IGMP: if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP) if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4, ntohs(iph->tot_len))) goto fail; break; case IPPROTO_TCP: if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) if (!tcf_csum_ipv4_tcp(skb, iph, iph->ihl * 4, ntohs(iph->tot_len))) goto fail; break; case IPPROTO_UDP: if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) if (!tcf_csum_ipv4_udp(skb, iph, iph->ihl * 4, ntohs(iph->tot_len), 0)) goto fail; break; case IPPROTO_UDPLITE: if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) if (!tcf_csum_ipv4_udp(skb, iph, iph->ihl * 4, ntohs(iph->tot_len), 1)) goto fail; break; } if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) { if (skb_cloned(skb) && !skb_clone_writable(skb, sizeof(*iph) + ntkoff) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto fail; ip_send_check(iph); } return 1; fail: return 0; }
static inline struct sk_buff * ip_vs_gather_frags(struct sk_buff *skb) { skb = ip_defrag(skb); if (skb) ip_send_check(skb->nh.iph); return skb; }
static inline struct sk_buff * ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user) { skb = ip_defrag(skb, user); if (skb) ip_send_check(ip_hdr(skb)); return skb; }
/* When forwarding a packet, we must ensure that we've got enough headroom * for the encapsulation packet in the skb. This also gives us an * opportunity to figure out what the payload_len, dsfield, ttl, and df * values should be, so that we won't need to look at the old ip header * again */ static struct sk_buff * ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af, unsigned int max_headroom, __u8 *next_protocol, __u32 *payload_len, __u8 *dsfield, __u8 *ttl, __be16 *df) { struct sk_buff *new_skb = NULL; struct iphdr *old_iph = NULL; #ifdef CONFIG_IP_VS_IPV6 struct ipv6hdr *old_ipv6h = NULL; #endif ip_vs_drop_early_demux_sk(skb); if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) { new_skb = skb_realloc_headroom(skb, max_headroom); if (!new_skb) goto error; if (skb->sk) skb_set_owner_w(new_skb, skb->sk); consume_skb(skb); skb = new_skb; } #ifdef CONFIG_IP_VS_IPV6 if (skb_af == AF_INET6) { old_ipv6h = ipv6_hdr(skb); *next_protocol = IPPROTO_IPV6; if (payload_len) *payload_len = ntohs(old_ipv6h->payload_len) + sizeof(*old_ipv6h); *dsfield = ipv6_get_dsfield(old_ipv6h); *ttl = old_ipv6h->hop_limit; if (df) *df = 0; } else #endif { old_iph = ip_hdr(skb); /* Copy DF, reset fragment offset and MF */ if (df) *df = (old_iph->frag_off & htons(IP_DF)); *next_protocol = IPPROTO_IPIP; /* fix old IP header checksum */ ip_send_check(old_iph); *dsfield = ipv4_get_dsfield(old_iph); *ttl = old_iph->ttl; if (payload_len) *payload_len = ntohs(old_iph->tot_len); } return skb; error: kfree_skb(skb); return ERR_PTR(-ENOMEM); }
static int __xtnu_ip_local_out(struct sk_buff **pskb) { struct iphdr *iph = ip_hdr(*pskb); iph->tot_len = htons((*pskb)->len); ip_send_check(iph); return nf_hook(PF_INET, NF_IP_LOCAL_OUT, pskb, NULL, (*pskb)->dst->dev, dst_output); }
int __ip_local_out(struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = htons(skb->len); ip_send_check(iph); return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev, dst_output); }
static int ipip_output(struct xfrm_state *x, struct sk_buff *skb) { struct iphdr *iph; iph = skb->nh.iph; iph->tot_len = htons(skb->len); ip_send_check(iph); return 0; }
/* * Packet has been made sufficiently writable in caller * - inout: 1=in->out, 0=out->in */ void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, int inout) { struct iphdr *iph = ip_hdr(skb); unsigned int icmp_offset = iph->ihl*4; struct icmphdr *icmph = (struct icmphdr *)(skb_network_header(skb) + icmp_offset); struct iphdr *ciph = (struct iphdr *)(icmph + 1); if (inout) { iph->saddr = cp->vaddr; ip_send_check(iph); ciph->daddr = cp->vaddr; ip_send_check(ciph); } else { iph->daddr = cp->daddr; ip_send_check(iph); ciph->saddr = cp->daddr; ip_send_check(ciph); } /* the TCP/UDP port */ if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol) { __be16 *ports = (void *)ciph + ciph->ihl*4; if (inout) ports[1] = cp->vport; else ports[0] = cp->dport; } /* And finally the ICMP checksum */ icmph->checksum = 0; icmph->checksum = ip_vs_checksum_complete(skb, icmp_offset); skb->ip_summed = CHECKSUM_UNNECESSARY; if (inout) IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph, "Forwarding altered outgoing ICMP"); else IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph, "Forwarding altered incoming ICMP"); }
int ip_rewrite_addrs (struct sock *sk, struct sk_buff *skb, struct device *dev) { u32 new_saddr = dev->pa_addr; struct iphdr *iph; /* * Be carefull: new_saddr must be !0 */ if (!new_saddr) { printk(KERN_WARNING "ip_rewrite_addrs(): NULL device \"%s\" addr\n", dev->name); return 0; } /* * Ouch!, this should not happen. */ if (!sk->saddr || !sk->rcv_saddr) { printk(KERN_WARNING "ip_rewrite_addrs(): not valid sock addrs: saddr=%08lX rcv_saddr=%08lX", ntohl(sk->saddr), ntohl(sk->rcv_saddr)); return 0; } /* * Be verbose if sysctl value & 2 */ if (sysctl_ip_dynaddr & 2) { printk(KERN_INFO "ip_rewrite_addrs(): shifting saddr from %s", in_ntoa(skb->saddr)); printk(" to %s (state %d)\n", in_ntoa(new_saddr), sk->state); } iph = skb->ip_hdr; if (new_saddr != iph->saddr) { iph->saddr = new_saddr; skb->saddr = new_saddr; ip_send_check(iph); } else if (sysctl_ip_dynaddr & 2) { printk(KERN_WARNING "ip_rewrite_addrs(): skb already changed (???).\n"); return 0; } /* * Maybe whe are in a skb chain loop and socket address has * yet been 'damaged'. */ if (new_saddr != sk->saddr) { sk->saddr = new_saddr; sk->rcv_saddr = new_saddr; sk->prot->rehash(sk); } else if (sysctl_ip_dynaddr & 2) printk(KERN_NOTICE "ip_rewrite_addrs(): no change needed for sock\n"); return 1; }
static int ipip_output(struct sk_buff *skb) { struct dst_entry *dst = skb->dst; struct xfrm_state *x = dst->xfrm; struct iphdr *iph, *top_iph; int tos, err; if ((err = xfrm4_tunnel_check_size(skb)) != 0) goto error_nolock; iph = skb->nh.iph; spin_lock_bh(&x->lock); tos = iph->tos; top_iph = (struct iphdr *) skb_push(skb, x->props.header_len); top_iph->ihl = 5; top_iph->version = 4; top_iph->tos = INET_ECN_encapsulate(tos, iph->tos); top_iph->tot_len = htons(skb->len); top_iph->frag_off = iph->frag_off & ~htons(IP_MF|IP_OFFSET); if (!(iph->frag_off & htons(IP_DF))) { #ifdef NETIF_F_TSO __ip_select_ident(top_iph, dst, 0); #else __ip_select_ident(top_iph, dst); #endif } top_iph->ttl = iph->ttl; top_iph->protocol = IPPROTO_IPIP; top_iph->check = 0; top_iph->saddr = x->props.saddr.a4; top_iph->daddr = x->id.daddr.a4; memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); ip_send_check(top_iph); skb->nh.raw = skb->data; x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock_bh(&x->lock); if ((skb->dst = dst_pop(dst)) == NULL) { kfree_skb(skb); err = -EHOSTUNREACH; goto error_nolock; } return NET_XMIT_BYPASS; error_nolock: kfree_skb(skb); return err; }
static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook, u8 code) { struct sk_buff *nskb; struct iphdr *niph; struct icmphdr *icmph; unsigned int len; void *payload; __wsum csum; if (!nft_reject_iphdr_validate(oldskb)) return; /* IP header checks: fragment. */ if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) return; /* RFC says return as much as we can without exceeding 576 bytes. */ len = min_t(unsigned int, 536, oldskb->len); if (!pskb_may_pull(oldskb, len)) return; if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), 0)) return; nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) + LL_MAX_HEADER + len, GFP_ATOMIC); if (!nskb) return; skb_reserve(nskb, LL_MAX_HEADER); niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP, sysctl_ip_default_ttl); skb_reset_transport_header(nskb); icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr)); memset(icmph, 0, sizeof(*icmph)); icmph->type = ICMP_DEST_UNREACH; icmph->code = code; payload = skb_put(nskb, len); memcpy(payload, skb_network_header(oldskb), len); csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0); icmph->checksum = csum_fold(csum); niph->tot_len = htons(nskb->len); ip_send_check(niph); nft_reject_br_push_etherhdr(oldskb, nskb); br_deliver(br_port_get_rcu(oldskb->dev), nskb); }
int igmp_send_report_full(struct net_device *dev, u32 group, int type, u8 respond, u32 dst) { struct sk_buff *skb; struct iphdr *iph; struct igmphdr *ih; struct rtable *rt; if (ip_route_output(&rt, dst, 0, 0, dev->ifindex)) return -1; if (rt->rt_src == 0) { ip_rt_put(rt); return -1; } skb=alloc_skb(IGMP_SIZE+dev->hard_header_len+15, GFP_ATOMIC); if (skb == NULL) { ip_rt_put(rt); return -1; } skb->dst = &rt->u.dst; skb_reserve(skb, (dev->hard_header_len+15)&~15); skb->nh.iph = iph = (struct iphdr *)skb_put(skb, sizeof(struct iphdr)+4); iph->version = 4; iph->ihl = (sizeof(struct iphdr)+4)>>2; iph->tos = 0; iph->frag_off = htons(IP_DF); iph->ttl = 1; iph->daddr = dst; iph->saddr = rt->rt_src; iph->protocol = IPPROTO_IGMP; iph->tot_len = htons(IGMP_SIZE); ip_select_ident(iph, &rt->u.dst, NULL); ((u8*)&iph[1])[0] = IPOPT_RA; ((u8*)&iph[1])[1] = 4; ((u8*)&iph[1])[2] = 0; ((u8*)&iph[1])[3] = 0; ip_send_check(iph); ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr)); ih->type=type; ih->code=respond; ih->csum=0; ih->group=group; ih->csum=ip_compute_csum((void *)ih, sizeof(struct igmphdr)); return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev, output_maybe_reroute); }
static struct sk_buff * ns_ct_ipv4_gather_frags(struct sk_buff *skb, int (*okfn)(struct sk_buff*), s32 user) { skb_orphan(skb); local_bh_disable(); skb = ip_defrag(skb, user); local_bh_enable(); if (skb) ip_send_check(skb->nh.iph); return skb; }
int __ip_local_out(struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); #if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE) FOE_AI_UNHIT(skb); #endif iph->tot_len = htons(skb->len); ip_send_check(iph); return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev, dst_output); }
static unsigned int tee_tg4(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_tee_tginfo *info = par->targinfo; struct iphdr *iph; if (percpu_read(tee_active)) return XT_CONTINUE; /* * Copy the skb, and route the copy. Will later return %XT_CONTINUE for * the original skb, which should continue on its way as if nothing has * happened. The copy should be independently delivered to the TEE * --gateway. */ skb = pskb_copy(skb, GFP_ATOMIC); if (skb == NULL) return XT_CONTINUE; #ifdef WITH_CONNTRACK /* Avoid counting cloned packets towards the original connection. */ nf_conntrack_put(skb->nfct); skb->nfct = &nf_ct_untracked_get()->ct_general; skb->nfctinfo = IP_CT_NEW; nf_conntrack_get(skb->nfct); #endif /* * If we are in PREROUTING/INPUT, the checksum must be recalculated * since the length could have changed as a result of defragmentation. * * We also decrease the TTL to mitigate potential TEE loops * between two hosts. * * Set %IP_DF so that the original source is notified of a potentially * decreased MTU on the clone route. IPv6 does this too. */ iph = ip_hdr(skb); iph->frag_off |= htons(IP_DF); if (par->hooknum == NF_INET_PRE_ROUTING || par->hooknum == NF_INET_LOCAL_IN) --iph->ttl; ip_send_check(iph); if (tee_tg_route4(skb, info)) { percpu_write(tee_active, true); ip_local_out(skb); percpu_write(tee_active, false); } else { kfree_skb(skb); } return XT_CONTINUE; }
static int mangle_packet(struct sk_buff **pskb, struct ip_conntrack *ct, u_int32_t newip, u_int16_t port, struct talk_addr *addr, struct talk_addr *ctl_addr) { struct iphdr *iph = (*pskb)->nh.iph; struct udphdr *udph = (void *)iph + iph->ihl * 4; size_t udplen = (*pskb)->len - iph->ihl * 4; /* Fortunately talk sends a structure with the address and port in it. The size of the packet won't change. */ if (ctl_addr == NULL) { /* response */ if (addr->ta_addr == INADDR_ANY) return 1; DEBUGP("ip_nat_talk_mangle_packet: response orig %u.%u.%u.%u:%u, inserting %u.%u.%u.%u:%u\n", NIPQUAD(addr->ta_addr), ntohs(addr->ta_port), NIPQUAD(newip), ntohs(port)); addr->ta_addr = newip; addr->ta_port = port; } else { /* message */ if (addr->ta_addr != INADDR_ANY) { /* Change address inside packet to match way we're mapping this connection. */ DEBUGP("ip_nat_talk_mangle_packet: message orig addr %u.%u.%u.%u:%u, inserting %u.%u.%u.%u:%u\n", NIPQUAD(addr->ta_addr), ntohs(addr->ta_port), NIPQUAD(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip), ntohs(addr->ta_port)); addr->ta_addr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip; } DEBUGP("ip_nat_talk_mangle_packet: message orig ctl_addr %u.%u.%u.%u:%u, inserting %u.%u.%u.%u:%u\n", NIPQUAD(ctl_addr->ta_addr), ntohs(ctl_addr->ta_port), NIPQUAD(newip), ntohs(port)); ctl_addr->ta_addr = newip; ctl_addr->ta_port = port; } /* Fix checksums */ (*pskb)->csum = csum_partial((char *)udph + sizeof(struct udphdr), udplen - sizeof(struct udphdr), 0); udph->check = 0; udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, udplen, IPPROTO_UDP, csum_partial((char *)udph, sizeof(struct udphdr), (*pskb)->csum)); ip_send_check(iph); return 1; }
int __ip_local_out(struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = htons(skb->len); ip_send_check(iph); /* Mark skb to identify SMB data packet */ if ((ip_hdr(skb)->protocol == IPPROTO_TCP) && tcp_hdr(skb)) skb->tcpf_smb = (tcp_hdr(skb)->source == htons(0x01bd)); return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev, dst_output); }
/* Returns new sk_buff, or NULL */ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) { int err; /* 使包成为不属于任何套接字的孤包 */ skb_orphan(skb); local_bh_disable(); err = ip_defrag(skb, user); local_bh_enable(); if (!err) ip_send_check(ip_hdr(skb)); return err; }
void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum, const struct in_addr *gw, int oif) { struct iphdr *iph; if (this_cpu_read(nf_skb_duplicated)) return; /* * Copy the skb, and route the copy. Will later return %XT_CONTINUE for * the original skb, which should continue on its way as if nothing has * happened. The copy should be independently delivered to the gateway. */ skb = pskb_copy(skb, GFP_ATOMIC); if (skb == NULL) return; #if IS_ENABLED(CONFIG_NF_CONNTRACK) /* Avoid counting cloned packets towards the original connection. */ nf_conntrack_put(skb->nfct); skb->nfct = &nf_ct_untracked_get()->ct_general; skb->nfctinfo = IP_CT_NEW; nf_conntrack_get(skb->nfct); #endif /* * If we are in PREROUTING/INPUT, the checksum must be recalculated * since the length could have changed as a result of defragmentation. * * We also decrease the TTL to mitigate potential loops between two * hosts. * * Set %IP_DF so that the original source is notified of a potentially * decreased MTU on the clone route. IPv6 does this too. */ iph = ip_hdr(skb); iph->frag_off |= htons(IP_DF); if (hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_LOCAL_IN) --iph->ttl; ip_send_check(iph); if (nf_dup_ipv4_route(net, skb, gw, oif)) { __this_cpu_write(nf_skb_duplicated, true); ip_local_out(net, skb->sk, skb); __this_cpu_write(nf_skb_duplicated, false); } else { kfree_skb(skb); } }