static unsigned int nf_nat_out(unsigned int hooknum, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { #ifdef CONFIG_XFRM const struct nf_conn *ct; enum ip_conntrack_info ctinfo; #endif unsigned int ret; /* root is playing with raw sockets. */ if (skb->len < sizeof(struct iphdr) || ip_hdrlen(skb) < sizeof(struct iphdr)) return NF_ACCEPT; ret = nf_nat_fn(hooknum, skb, in, out, okfn); #ifdef CONFIG_XFRM if (ret != NF_DROP && ret != NF_STOLEN && (ct = nf_ct_get(skb, &ctinfo)) != NULL) { enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); if ((ct->tuplehash[dir].tuple.src.u3.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) || (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP && ct->tuplehash[dir].tuple.src.u.all != ct->tuplehash[!dir].tuple.dst.u.all) ) return ip_xfrm_me_harder(skb) == 0 ? ret : NF_DROP; } #endif return ret; }
s32 nf_session_forward_fix(struct sk_buff *skb) { struct iphdr *iph = skb->nh.iph; struct nf_conn* ct = (struct nf_conn*)skb->nfct; struct drv_ff_cache_info *ff_info = &((struct ff_cache_info_ex*)ct)->ff_info[IP_CT_DIR_REPLY]; if (ct && IP_CT_DIR_REPLY == CTINFO2DIR(skb->nfctinfo) && !(((struct ff_cache_info_ex*)skb->nfct)->ff_info[IP_CT_DIR_ORIGINAL].flag & DRV_FF_FLAG_LINUX) && !is_vpn_pkt(skb)) { u32 out_inf = ff_info->out_inf; /*减ttl*/ if (iph->ttl <= 1) goto drop; ip_decrease_ttl(iph); /*FW快转打三层快转标志位*/ skb->ff_flag = ff_set_flag(skb, DRV_FF_FLAG_L3); skb->dev = if_dev_get_by_index(out_inf); if (skb->dev == NULL) { goto drop; } if(NF_DROP == nf_nat_fn(NF_IP_POST_ROUTING, &skb, NULL, skb->dev, dev_queue_xmit)) { goto drop; } if (NF_DROP == ipv4_conntrack_help(NF_IP_POST_ROUTING, &skb, NULL, skb->dev, dev_queue_xmit)) { goto drop; } if (NF_DROP == nf_nat_adjust(NF_IP_POST_ROUTING, &skb, NULL, skb->dev, dev_queue_xmit)) { goto drop; } if (NF_DROP == ipv4_confirm(NF_IP_POST_ROUTING, &skb, NULL, skb->dev, dev_queue_xmit)) { goto drop; } if ((skb->len > 1500) && !skb_is_gso(skb)) { return ip_session_out_fragment(skb, dev_queue_xmit); } skb_push(skb, ETH_HLEN); memcpy(skb->data, (u8*)ff_info->gw_ether_mac, ETH_ALEN); memcpy(skb->data+ETH_ALEN, (u8*)ff_info->gw_ether_mac+ETH_ALEN, ETH_ALEN); memcpy(skb->data+2*ETH_ALEN, (u8*)ff_info->gw_ether_mac+2*ETH_ALEN, 2); g_session_forward_count++; return dev_queue_xmit(skb); } return 2; drop: kfree_skb(skb); return NET_RX_DROP; }
static unsigned int nf_nat_local_fn(unsigned int hooknum, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { const struct nf_conn *ct; enum ip_conntrack_info ctinfo; unsigned int ret; /* root is playing with raw sockets. */ if (skb->len < sizeof(struct iphdr) || ip_hdrlen(skb) < sizeof(struct iphdr)) return NF_ACCEPT; ret = nf_nat_fn(hooknum, skb, in, out, okfn); if (ret != NF_DROP && ret != NF_STOLEN && (ct = nf_ct_get(skb, &ctinfo)) != NULL) { enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); if (ct->tuplehash[dir].tuple.dst.u3.ip != ct->tuplehash[!dir].tuple.src.u3.ip) { if (ip_route_me_harder(skb, RTN_UNSPEC)) ret = NF_DROP; } #ifdef CONFIG_XFRM else if (ct->tuplehash[dir].tuple.dst.u.all != ct->tuplehash[!dir].tuple.src.u.all) if (ip_xfrm_me_harder(skb)) ret = NF_DROP; #endif } return ret; }
static unsigned int nf_nat_output(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { enum ip_conntrack_info ctinfo; const struct nf_conn *ct; unsigned int ret; ret = nf_nat_fn(ops, skb, in, out, okfn); if (ret != NF_DROP && ret != NF_STOLEN && (ct = nf_ct_get(skb, &ctinfo)) != NULL) { enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); if (ct->tuplehash[dir].tuple.dst.u3.ip != ct->tuplehash[!dir].tuple.src.u3.ip) { if (ip_route_me_harder(skb, RTN_UNSPEC)) ret = NF_DROP; } #ifdef CONFIG_XFRM else if (ct->tuplehash[dir].tuple.dst.u.all != ct->tuplehash[!dir].tuple.src.u.all) if (nf_xfrm_me_harder(skb, AF_INET)) ret = NF_DROP; #endif } return ret; }
static unsigned int nf_nat_postrouting(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { enum ip_conntrack_info ctinfo __maybe_unused; const struct nf_conn *ct __maybe_unused; unsigned int ret; ret = nf_nat_fn(ops, skb, in, out, okfn); #ifdef CONFIG_XFRM if (ret != NF_DROP && ret != NF_STOLEN && (ct = nf_ct_get(skb, &ctinfo)) != NULL) { enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); if (ct->tuplehash[dir].tuple.src.u3.ip != ct->tuplehash[!dir].tuple.dst.u3.ip || ct->tuplehash[dir].tuple.src.u.all != ct->tuplehash[!dir].tuple.dst.u.all) return nf_xfrm_me_harder(skb, AF_INET) == 0 ? ret : NF_DROP; } #endif return ret; }
static unsigned int nf_nat_prerouting(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { __be32 daddr = ip_hdr(skb)->daddr; unsigned int ret; ret = nf_nat_fn(ops, skb, in, out, okfn); if (ret != NF_DROP && ret != NF_STOLEN && ip_hdr(skb)->daddr != daddr) { skb_dst_drop(skb); } return ret; }
static unsigned int nf_nat_in(unsigned int hooknum, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { unsigned int ret; __be32 daddr = ip_hdr(skb)->daddr; ret = nf_nat_fn(hooknum, skb, in, out, okfn); if (ret != NF_DROP && ret != NF_STOLEN && daddr != ip_hdr(skb)->daddr) skb_dst_drop(skb); return ret; }
static unsigned int nf_nat_in(unsigned int hooknum, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { unsigned int ret; __be32 daddr = ip_hdr(*pskb)->daddr; ret = nf_nat_fn(hooknum, pskb, in, out, okfn); if (ret != NF_DROP && ret != NF_STOLEN && daddr != ip_hdr(*pskb)->daddr) { dst_release((*pskb)->dst); (*pskb)->dst = NULL; } return ret; }