/* * Deliver IP Packets to the higher protocol layers. */ int ip_local_deliver(struct sk_buff *skb) { /* * Reassemble IP fragments. */ if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { if (ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER)) return 0; } #if defined(CONFIG_RTL_USB_IP_HOST_SPEEDUP) || defined(CONFIG_HTTP_FILE_SERVER_SUPPORT) || defined(CONFIG_RTL_USB_UWIFI_HOST_SPEEDUP) if(isUsbIp_Reserved(skb,NF_INET_LOCAL_IN, 0)==0){ return NF_HOOK(PF_INET, NF_INET_LOCAL_IN, skb, skb->dev, NULL, ip_local_deliver_finish); }else{ return ip_local_deliver_finish(skb); } #else return NF_HOOK(PF_INET, NF_INET_LOCAL_IN, skb, skb->dev, NULL, ip_local_deliver_finish); #endif }
/* after ipt_filter */ static unsigned int ezp_nat_pre_hook(unsigned int hooknum, struct sk_buff *skb, const struct net_device *indev, const struct net_device *outdev, int (*okfn)(struct sk_buff *)) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; int ret = NF_ACCEPT; enum ip_conntrack_dir dir; __u32 dnat_addr = 0, snat_addr = 0; int* nat_flag; struct dst_entry** dst_to_use = NULL; struct iphdr *iph = ip_hdr(skb); struct icmphdr *hdr = icmp_hdr(skb); struct tcphdr *tcph = tcp_hdr(skb); /* EZP: enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum); */ if(!ezp_nat_enable_flag){ return NF_ACCEPT; } ct = nf_ct_get(skb, &ctinfo); if (!ct) { if (iph->protocol == IPPROTO_ICMP && hdr->type == ICMP_REDIRECT) return NF_DROP; return NF_ACCEPT; } /* TCP or UDP. */ if ((iph->protocol != IPPROTO_TCP) && (iph->protocol != IPPROTO_UDP) ) { return NF_ACCEPT; } if ((iph->protocol == IPPROTO_TCP) && ((tcp_flag_word(tcph) & (TCP_FLAG_RST | TCP_FLAG_SYN)) == TCP_FLAG_SYN)) { return NF_ACCEPT; } /* Make sure it is confirmed. */ if (!nf_ct_is_confirmed(ct)) { return NF_ACCEPT; } /* We comment out this part since ((tcp_flag_word((*pskb)->h.th) == TCP_FLAG_SYN) || * 1. conntrack establishing is a 2 way process, but after routing, we have * established routing entry and address resolution table, so we don't * need to check ESTABLISH state. * 2. With establishing state, we need to go through forward state and * routing several times. It may occur that our holded entry may be * replaced. */ /* if ((ctinfo != IP_CT_ESTABLISHED) && (ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY)) { return NF_ACCEPT; } */ dir = CTINFO2DIR(ctinfo); if (dir == IP_CT_DIR_ORIGINAL) { if (!ct->orgdir_dst) { return NF_ACCEPT; } else { nat_flag = &ct->orgdir_rid; if (!(*nat_flag & ((1 << IP_NAT_MANIP_DST) | (1 << IP_NAT_MANIP_SRC) | (1 << EZP_IP_LOCAL_IN)))) { return NF_ACCEPT; } /* Check only in forward case and ignore input case */ if (!(*nat_flag & (1 << EZP_IP_LOCAL_IN))) { if ((!ct->orgdir_dst->hh) && (!ct->orgdir_dst->neighbour)) { printk("%s:orig dst and neighbour null dir\n",__FUNCTION__); return NF_ACCEPT; } } if (skb->dst) { /* skb might has its own dst already. * e.g. output to local input */ dst_release(skb->dst); } skb->protocol = htons(ETH_P_IP); skb->dst = ct->orgdir_dst; /* XXX: */ skb->dev = ct->orgdir_dst->dev; /* skb uses this dst_entry */ dst_use(skb->dst, jiffies); dst_to_use = &ct->orgdir_dst; } } else { /* IP_CT_DIR_REPLY */ if (!ct->replydir_dst) { return NF_ACCEPT; } else { nat_flag = &ct->replydir_rid; if (!(*nat_flag & ((1 << IP_NAT_MANIP_DST) | (1 << IP_NAT_MANIP_SRC) | (1 << EZP_IP_LOCAL_IN)))) { return NF_ACCEPT; } /* Check only in forward case and ignore input case */ if (!(*nat_flag & (1 << EZP_IP_LOCAL_IN))) { if ((!ct->replydir_dst->hh) && (!ct->replydir_dst->neighbour)) { printk("%s:reply dst and neighbour null\n",__FUNCTION__); return NF_ACCEPT; } } if (skb->dst) { /* skb might has its own dst already. */ /* e.g. output to local input */ dst_release(skb->dst); } skb->protocol = htons(ETH_P_IP); skb->dst = ct->replydir_dst; /* XXX: */ skb->dev = ct->replydir_dst->dev; /* skb uses this dst_entry */ dst_use(skb->dst, jiffies); dst_to_use = &ct->replydir_dst; } } /* After this point, every "return NF_ACCEPT" action need to release * holded dst entry. So we use "goto release_dst_and_return" to handle the * action commonly. */ /* EZP: if (!nf_nat_initialized(ct, maniptype)) { goto release_dst_and_return; } */ /* If we have helper, we need to go original path until conntrack * confirmed */ if(nfct_help(ct)){ goto release_dst_and_return; } if (dir == IP_CT_DIR_ORIGINAL) { (skb)->imq_flags = ct->ct_orig_imq_flags; } else{ (skb)->imq_flags = ct->ct_repl_imq_flags; } /* PRE_ROUTING NAT */ /* Assume DNAT conntrack is ready. */ if ((*nat_flag & (1 << IP_NAT_MANIP_DST))){ dnat_addr = iph->daddr; ret = nf_nat_packet(ct, ctinfo, NF_INET_PRE_ROUTING, skb); if (ret != NF_ACCEPT) { goto release_dst_and_return; } if (dnat_addr == iph->daddr) { *nat_flag &= ~(1 << IP_NAT_MANIP_DST); } } /* INPUT */ if ((*nat_flag & (1 << EZP_IP_LOCAL_IN))){ /* TODO: use ip_local_deliver_finish() and add ip_defrag(). */ /* XXX: Not sure this will hit or not. */ /* * Reassemble IP fragments. */ if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { if (ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER)) { /* If return value is not 0, defrag error */ /* return 0; */ /* XXX: return NF_STOLEN? */ goto release_dst_and_return; } } /* For INPUT path, there is no need to check dst_mtu but defrag. if (skb->len > dst_mtu(&((struct rtable*)skb->dst)->u.dst)) { goto release_dst_and_return; }*/ if (ezp_nat_queue_enable_flag) { if ((skb)->imq_flags & IMQ_F_ENQUEUE) { struct nf_hook_ops *elem = nf_get_imq_ops(); /* With to apply IMQ, we have to check the IMQ flag, if the flag is * set, we have to enquene this skb and leave it to IMQ*/ if (elem != NULL) { nf_queue(skb, (struct list_head*)elem, AF_INET, NF_INET_POST_ROUTING, (struct net_device*)indev, (struct net_device*) ((struct rtable*)skb->dst)->u.dst.dev, ip_local_deliver_finish, NF_ACCEPT >> NF_VERDICT_BITS); return NF_STOLEN; } } } ret = ip_local_deliver_finish(skb); return NF_STOLEN; }