/* Returns new sk_buff, or NULL */
static struct sk_buff *
nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
{
	skb_orphan(skb);

	local_bh_disable();
	skb = ip_defrag(skb, user);
	local_bh_enable();

	if (skb)
		ip_send_check(skb->nh.iph);

	return skb;
}
Ejemplo n.º 2
0
/*
 * 	Deliver IP Packets to the higher protocol layers.
 */
int ip_local_deliver(struct sk_buff *skb)
{
	/*
	 *	Reassemble IP fragments.
	 */

	if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
		if (ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER))
			return 0;
	}

	return NF_HOOK(PF_INET, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
		       ip_local_deliver_finish);
}
Ejemplo n.º 3
0
/*
 * 	Deliver IP Packets to the higher protocol layers.
 */
int ip_local_deliver(struct sk_buff *skb)
{
	/*
	 *	Reassemble IP fragments.
	 */

	if (ip_is_fragment(ip_hdr(skb))) {
		if (ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER))
			return 0;
	}

	return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
		       ip_local_deliver_finish);
}
Ejemplo n.º 4
0
static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
{
	int err;

	skb_orphan(skb);

	local_bh_disable();
	err = ip_defrag(skb, user);
	local_bh_enable();

	if (!err)
		skb->local_df = 1;

	return err;
}
Ejemplo n.º 5
0
/*
 * 	Deliver IP Packets to the higher protocol layers.
 */ 
int ip_local_deliver(struct sk_buff *skb)
{
	/*
	 *	Reassemble IP fragments.
	 */

	if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
		skb = ip_defrag(skb);
		if (!skb)
			return 0;
	}

	return NF_HOOK(PF_INET, NF_IP_LOCAL_IN, skb, skb->dev, NULL,
		       ip_local_deliver_finish);
}
Ejemplo n.º 6
0
/* Returns new sk_buff, or NULL */
static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
{
	int err;
	/* 使包成为不属于任何套接字的孤包  */
	skb_orphan(skb);

	local_bh_disable();
	err = ip_defrag(skb, user);
	local_bh_enable();

	if (!err)
		ip_send_check(ip_hdr(skb));

	return err;
}
Ejemplo n.º 7
0
static int handle_fragments(struct net *net, struct sw_flow_key *key,
			    u16 zone, struct sk_buff *skb)
{
	struct ovs_skb_cb ovs_cb = *OVS_CB(skb);

	if (key->eth.type == htons(ETH_P_IP)) {
		enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
		int err;

		memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
		err = ip_defrag(net, skb, user);
		if (err)
			return err;

		ovs_cb.mru = IPCB(skb)->frag_max_size;
	} else if (key->eth.type == htons(ETH_P_IPV6)) {
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
		enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
		struct sk_buff *reasm;

		memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
		reasm = nf_ct_frag6_gather(net, skb, user);
		if (!reasm)
			return -EINPROGRESS;

		if (skb == reasm)
			return -EINVAL;

		key->ip.proto = ipv6_hdr(reasm)->nexthdr;
		skb_morph(skb, reasm);
		consume_skb(reasm);
		ovs_cb.mru = IP6CB(skb)->frag_max_size;
#else
		return -EPFNOSUPPORT;
#endif
	} else {
		return -EPFNOSUPPORT;
	}

	key->ip.frag = OVS_FRAG_TYPE_NONE;
	skb_clear_hash(skb);
	skb->ignore_df = 1;
	*OVS_CB(skb) = ovs_cb;

	return 0;
}
Ejemplo n.º 8
0
static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
{
	int err;

	skb_orphan(skb);

	local_bh_disable();
	err = ip_defrag(skb, user);
	local_bh_enable();

	if (!err) {
		ip_send_check(ip_hdr(skb));
		skb->ignore_df = 1;
	}

	return err;
}
Ejemplo n.º 9
0
/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
 * value if 'skb' is freed.
 */
static int handle_fragments(struct net *net, struct sw_flow_key *key,
			    u16 zone, struct sk_buff *skb)
{
	struct ovs_gso_cb ovs_cb = *OVS_GSO_CB(skb);
	int err;

	if (!skb->dev) {
		OVS_NLERR(true, "%s: skb has no dev; dropping", __func__);
		return -EINVAL;
	}

	if (key->eth.type == htons(ETH_P_IP)) {
		enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;

		memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
		err = ip_defrag(net, skb, user);
		if (err)
			return err;

		ovs_cb.dp_cb.mru = IPCB(skb)->frag_max_size;
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
	} else if (key->eth.type == htons(ETH_P_IPV6)) {
		enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;

		skb_orphan(skb);
		memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
		err = nf_ct_frag6_gather(net, skb, user);
		if (err)
			return err;

		key->ip.proto = ipv6_hdr(skb)->nexthdr;
		ovs_cb.dp_cb.mru = IP6CB(skb)->frag_max_size;
#endif /* IP frag support */
	} else {
		kfree_skb(skb);
		return -EPFNOSUPPORT;
	}

	key->ip.frag = OVS_FRAG_TYPE_NONE;
	skb_clear_hash(skb);
	skb->ignore_df = 1;
	*OVS_GSO_CB(skb) = ovs_cb;

	return 0;
}
Ejemplo n.º 10
0
/*
 * 	Deliver IP Packets to the higher protocol layers.
 */
int ip_local_deliver(struct sk_buff *skb)
{
	/*
	 *	Reassemble IP fragments.
	 */

	if (ip_is_fragment(ip_hdr(skb))) {
		if (ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER))
			return 0;
	}

#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
#if defined (CONFIG_HNAT_V2)
	if (ip_hdr(skb)->protocol != IPPROTO_IPV6)
#endif
	FOE_ALG_MARK(skb);
#endif
	return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
		       ip_local_deliver_finish);
}
Ejemplo n.º 11
0
int ip_call_ra_chain(struct sk_buff *skb)
{
	struct ip_ra_chain *ra;
	u8 protocol = ip_hdr(skb)->protocol;
	struct sock *last = NULL;
	struct net_device *dev = skb->dev;

	read_lock(&ip_ra_lock);
	for (ra = ip_ra_chain; ra; ra = ra->next) {
		struct sock *sk = ra->sk;

		/* If socket is bound to an interface, only report
		 * the packet if it came  from that interface.
		 */
		if (sk && inet_sk(sk)->num == protocol &&
		    (!sk->sk_bound_dev_if ||
		     sk->sk_bound_dev_if == dev->ifindex) &&
		    sock_net(sk) == dev_net(dev)) {
			if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
				if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) {
					read_unlock(&ip_ra_lock);
					return 1;
				}
			}
			if (last) {
				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
				if (skb2)
					raw_rcv(last, skb2);
			}
			last = sk;
		}
	}

	if (last) {
		raw_rcv(last, skb);
		read_unlock(&ip_ra_lock);
		return 1;
	}
	read_unlock(&ip_ra_lock);
	return 0;
}
Ejemplo n.º 12
0
/*
 *	It is hooked at the NF_IP_FORWARD chain, in order to catch ICMP
 *      packets destined for 0.0.0.0/0.
 *      When fwmark-based virtual service is used, such as transparent
 *      cache cluster, TCP packets can be marked and routed to ip_vs_in,
 *      but ICMP destined for 0.0.0.0/0 cannot not be easily marked and
 *      sent to ip_vs_in_icmp. So, catch them at the NF_IP_FORWARD chain
 *      and send them to ip_vs_in_icmp.
 */
static unsigned int ip_vs_forward_icmp(unsigned int hooknum,
				       struct sk_buff **skb_p,
				       const struct net_device *in,
				       const struct net_device *out,
				       int (*okfn)(struct sk_buff *))
{
	struct sk_buff	*skb = *skb_p;
	struct iphdr	*iph = skb->nh.iph;

	if (iph->protocol != IPPROTO_ICMP)
		return NF_ACCEPT;

	if (iph->frag_off & __constant_htons(IP_MF|IP_OFFSET)) {
		skb = ip_defrag(skb, IP_DEFRAG_VS_FWD);
		if (!skb)
			return NF_STOLEN;
		*skb_p = skb;
	}

	return ip_vs_in_icmp(skb_p);
}
Ejemplo n.º 13
0
/*
 *	Process Router Attention IP option
 */ 
int ip_call_ra_chain(struct sk_buff *skb)
{
	struct ip_ra_chain *ra;
	u8 protocol = skb->nh.iph->protocol;
	struct sock *last = NULL;

	read_lock(&ip_ra_lock);
	for (ra = ip_ra_chain; ra; ra = ra->next) {
		struct sock *sk = ra->sk;

		/* If socket is bound to an interface, only report
		 * the packet if it came  from that interface.
		 */
		if (sk && sk->num == protocol 
		    && ((sk->bound_dev_if == 0) 
			|| (sk->bound_dev_if == skb->dev->ifindex))) {
			if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
				skb = ip_defrag(skb);
				if (skb == NULL) {
					read_unlock(&ip_ra_lock);
					return 1;
				}
			}
			if (last) {
				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
				if (skb2)
					raw_rcv(last, skb2);
			}
			last = sk;
		}
	}

	if (last) {
		raw_rcv(last, skb);
		read_unlock(&ip_ra_lock);
		return 1;
	}
	read_unlock(&ip_ra_lock);
	return 0;
}
Ejemplo n.º 14
0
/*
 * 	Deliver IP Packets to the higher protocol layers.
 */
int ip_local_deliver(struct sk_buff *skb)
{
	/*
	 *	Reassemble IP fragments.
	 */

	if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
		if (ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER))
			return 0;
	}
#if defined(CONFIG_RTL_USB_IP_HOST_SPEEDUP) || defined(CONFIG_HTTP_FILE_SERVER_SUPPORT) || defined(CONFIG_RTL_USB_UWIFI_HOST_SPEEDUP)
	if(isUsbIp_Reserved(skb,NF_INET_LOCAL_IN, 0)==0){
		return NF_HOOK(PF_INET, NF_INET_LOCAL_IN, skb, skb->dev, NULL, ip_local_deliver_finish);
	}else{
		return ip_local_deliver_finish(skb);
	}

#else
	return NF_HOOK(PF_INET, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
		       ip_local_deliver_finish);
#endif
}
Ejemplo n.º 15
0
int
//! ip_defrag_stub(struct ip *iph, struct ip **defrag)
ip_defrag_stub(struct ip *iph, struct ip **defrag,IP_THREAD_LOCAL_P  ip_thread_local_p)
{
	int offset, flags, tot_len;
	struct sk_buff *skb;

//!   numpack++;
	ip_thread_local_p->numpack++;
//!   timenow = 0;
	ip_thread_local_p->timenow = 0;
//!   while (timer_head && timer_head->expires < jiffies()) {
	while (ip_thread_local_p->timer_head && ip_thread_local_p->timer_head->expires < jiffies(ip_thread_local_p)) {
//!     this_host = ((struct ipq *) (timer_head->data))->hf;
		ip_thread_local_p->this_host = ((struct ipq *) (ip_thread_local_p->timer_head->data))->hf;
//!     timer_head->function(timer_head->data);
		ip_thread_local_p->timer_head->function(ip_thread_local_p->timer_head->data);
	}
	offset = ntohs(iph->ip_off);
	flags = offset & ~IP_OFFSET;
	offset &= IP_OFFSET;
	if (((flags & IP_MF) == 0) && (offset == 0)) {
		//     ip_defrag(iph, 0, ip_thread_local_p);
		return IPF_NOTF;
	}
	tot_len = ntohs(iph->ip_len);
	skb = (struct sk_buff *) malloc(tot_len + sizeof(struct sk_buff));
	skb->data = (char *) (skb + 1);
	memcpy(skb->data, iph, tot_len);
	skb->truesize = tot_len + 16 + nids_params.dev_addon;
	skb->truesize = (skb->truesize + 15) & ~15;
	skb->truesize += nids_params.sk_buff_size;

	if ((*defrag = (struct ip *)ip_defrag((struct ip *) (skb->data), skb, ip_thread_local_p)))
		return IPF_NEW;

	return IPF_ISF;
}
Ejemplo n.º 16
0
/*
 *	Process Router Attention IP option (RFC 2113)
 */
bool ip_call_ra_chain(struct sk_buff *skb)
{
	struct ip_ra_chain *ra;
	u8 protocol = ip_hdr(skb)->protocol;
	struct sock *last = NULL;
	struct net_device *dev = skb->dev;
	struct net *net = dev_net(dev);

	for (ra = rcu_dereference(ip_ra_chain); ra; ra = rcu_dereference(ra->next)) {
		struct sock *sk = ra->sk;

		/* If socket is bound to an interface, only report
		 * the packet if it came  from that interface.
		 */
		if (sk && inet_sk(sk)->inet_num == protocol &&
		    (!sk->sk_bound_dev_if ||
		     sk->sk_bound_dev_if == dev->ifindex) &&
		    net_eq(sock_net(sk), net)) {
			if (ip_is_fragment(ip_hdr(skb))) {
				if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN))
					return true;
			}
			if (last) {
				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
				if (skb2)
					raw_rcv(last, skb2);
			}
			last = sk;
		}
	}

	if (last) {
		raw_rcv(last, skb);
		return true;
	}
	return false;
}
Ejemplo n.º 17
0
struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
{
	struct iphdr iph;
	int netoff;
	u32 len;

	if (skb->protocol != htons(ETH_P_IP))
		return skb;

	netoff = skb_network_offset(skb);

	if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0)
		return skb;

	if (iph.ihl < 5 || iph.version != 4)
		return skb;

	len = ntohs(iph.tot_len);
	if (skb->len < netoff + len || len < (iph.ihl * 4))
		return skb;

	if (ip_is_fragment(&iph)) {
		skb = skb_share_check(skb, GFP_ATOMIC);
		if (skb) {
			if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
				return skb;
			if (pskb_trim_rcsum(skb, netoff + len))
				return skb;
			memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
			if (ip_defrag(skb, user))
				return NULL;
			skb->rxhash = 0;
		}
	}
	return skb;
}
Ejemplo n.º 18
0
struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
{
	struct iphdr iph;
	u32 len;

	if (skb->protocol != htons(ETH_P_IP))
		return skb;
#if 0
	if (!skb_copy_bits(skb, 0, &iph, skb,sizeof(iph)))
		return skb;
#else
	skb_copy_from_linear_data_offset(skb,0,&iph,sizeof(iph));
#endif

	if (iph.ihl < 5 || iph.version != 4)
		return skb;

	len = ntohs(iph.tot_len);
	if (skb->len < len || len < (iph.ihl * 4))
		return skb;

	if (ip_is_fragment(&iph)) {
		skb = skb_share_check(skb, GFP_ATOMIC);
		if (skb) {
			if (!pskb_may_pull(skb, iph.ihl*4))
				return skb;
			if (pskb_trim_rcsum(skb, len))
				return skb;
			memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
			if (ip_defrag(skb, user))
				return NULL;
			skb_clear_hash(skb);
		}
	}
	return skb;
}
Ejemplo n.º 19
0
static int ip_rcv(struct sk_buff *skb, struct net_device *dev) {
	net_device_stats_t *stats = &dev->stats;
	const struct net_proto *nproto;
	iphdr_t *iph = ip_hdr(skb);
	__u16 old_check;
	size_t ip_len;
	int optlen;
	sk_buff_t *complete_skb;

	/**
	 *   RFC1122: 3.1.2.2 MUST silently discard any IP frame that fails the checksum.
	 *   Is the datagram acceptable?
	 *   1.  Length at least the size of an ip header
	 *   2.  Version of 4
	 *   3.  Checksums correctly. [Speed optimisation for later, skip loopback checksums]
	 *   4.  Doesn't have a bogus length
	 */
	if (skb->len < dev->hdr_len + IP_MIN_HEADER_SIZE
			|| IP_HEADER_SIZE(iph) < IP_MIN_HEADER_SIZE
			|| skb->len < dev->hdr_len + IP_HEADER_SIZE(iph)) {
		DBG(printk("ip_rcv: invalid IPv4 header length\n"));
		stats->rx_length_errors++;
		skb_free(skb);
		return 0; /* error: invalid header length */
	}


	if (iph->version != 4) {
		DBG(printk("ip_rcv: invalid IPv4 version\n"));
		stats->rx_err++;
		skb_free(skb);
		return 0; /* error: not ipv4 */
	}

	old_check = iph->check;
	ip_set_check_field(iph);
	if (old_check != iph->check) {
		DBG(printk("ip_rcv: invalid checksum %hx(%hx)\n",
				ntohs(old_check), ntohs(iph->check)));
		stats->rx_crc_errors++;
		skb_free(skb);
		return 0; /* error: invalid crc */
	}

	ip_len = ntohs(iph->tot_len);
	if (ip_len < IP_HEADER_SIZE(iph)
			|| skb->len < dev->hdr_len + ip_len) {
		DBG(printk("ip_rcv: invalid IPv4 length\n"));
		stats->rx_length_errors++;
		skb_free(skb);
		return 0; /* error: invalid length */
	}

	/* Setup transport layer (L4) header */
	skb->h.raw = skb->nh.raw + IP_HEADER_SIZE(iph);

	/* Validating */
	if (0 != nf_test_skb(NF_CHAIN_INPUT, NF_TARGET_ACCEPT, skb)) {
		DBG(printk("ip_rcv: dropped by input netfilter\n"));
		stats->rx_dropped++;
		skb_free(skb);
		return 0; /* error: dropped */
	}

	/* Forwarding */
	assert(skb->dev);
	assert(inetdev_get_by_dev(skb->dev));
	if (inetdev_get_by_dev(skb->dev)->ifa_address != 0) {
		/**
		 * FIXME
		 * This check needed for BOOTP protocol
		 * disable forwarding if interface is not set yet
		 */
		/**
		 * Check the destination address, and if it doesn't match
		 * any of own addresses, retransmit packet according to the routing table.
		 */
		if (!ip_is_local(iph->daddr, IP_LOCAL_BROADCAST)) {
			if (0 != nf_test_skb(NF_CHAIN_FORWARD, NF_TARGET_ACCEPT, skb)) {
				DBG(printk("ip_rcv: dropped by forward netfilter\n"));
				stats->rx_dropped++;
				skb_free(skb);
				return 0; /* error: dropped */
			}
			return ip_forward(skb);
		}
	}

	memset(skb->cb, 0, sizeof(skb->cb));
	optlen = IP_HEADER_SIZE(iph) - IP_MIN_HEADER_SIZE;
	if (optlen > 0) {
		/* NOTE : maybe it'd be better to copy skb here,
		 * 'cause options may cause modifications
		 * but smart people who wrote linux kernel
		 * say that this is extremely rarely needed
		 */
		ip_options_t *opts = (ip_options_t*)(skb->cb);

		memset(skb->cb, 0, sizeof(skb->cb));
		opts->optlen = optlen;
		if (ip_options_compile(skb, opts)) {
			DBG(printk("ip_rcv: invalid options\n"));
			stats->rx_err++;
			skb_free(skb);
			return 0; /* error: bad ops */
		}
		if (ip_options_handle_srr(skb)) {
			DBG(printk("ip_rcv: can't handle options\n"));
			stats->tx_err++;
			skb_free(skb);
			return 0; /* error: can't handle ops */
		}
	}

	/* It's very useful for us to have complete packet even for forwarding
	 * (we may apply any filter, we may perform NAT etc),
	 * but it'll break routing if different parts of a fragmented
	 * packet will use different routes. So they can't be assembled.
	 * See RFC 1812 for details
	 */
	if (ntohs(skb->nh.iph->frag_off) & (IP_MF | IP_OFFSET)) {
		if ((complete_skb = ip_defrag(skb)) == NULL) {
			if (skb == NULL) {
				return 0; /* error: */
			}
			return 0;
		} else {
			skb = complete_skb;
			iph = ip_hdr(complete_skb);
		}
	}

	/* When a packet is received, it is passed to any raw sockets
	 * which have been bound to its protocol or to socket with concrete protocol */
	raw_rcv(skb);

	nproto = net_proto_lookup(ETH_P_IP, iph->proto);
	if (nproto != NULL) {
		return nproto->handle(skb);
	}

	DBG(printk("ip_rcv: unknown protocol\n"));
	skb_free(skb);
	return 0; /* error: nobody wants this packet */
}
Ejemplo n.º 20
0
/* after ipt_filter */
static unsigned int ezp_nat_pre_hook(unsigned int hooknum, 
        struct sk_buff *skb, const struct net_device *indev,
        const struct net_device *outdev, 
        int (*okfn)(struct sk_buff *))
{
    struct nf_conn *ct;
    enum ip_conntrack_info ctinfo;
    int ret = NF_ACCEPT;
    enum ip_conntrack_dir dir;
    __u32 dnat_addr = 0, snat_addr = 0;    
    int* nat_flag;
    struct dst_entry** dst_to_use = NULL;
    struct iphdr *iph = ip_hdr(skb);
    struct icmphdr *hdr = icmp_hdr(skb);
    struct tcphdr *tcph = tcp_hdr(skb);
    /* EZP: enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum); */

    if(!ezp_nat_enable_flag){
        return NF_ACCEPT;
    }
    ct = nf_ct_get(skb, &ctinfo);
    if (!ct) {
        if (iph->protocol == IPPROTO_ICMP
                && hdr->type == ICMP_REDIRECT)
            return NF_DROP;
        return NF_ACCEPT;
    }
    
    /* TCP or UDP. */
    if ((iph->protocol != IPPROTO_TCP) &&
            (iph->protocol != IPPROTO_UDP) ) {
        return NF_ACCEPT;
    }
    if ((iph->protocol == IPPROTO_TCP) && 
            ((tcp_flag_word(tcph) & (TCP_FLAG_RST | TCP_FLAG_SYN)) == 
             TCP_FLAG_SYN)) {
        return NF_ACCEPT;
    }
    /* Make sure it is confirmed. */
    if (!nf_ct_is_confirmed(ct)) {
        return NF_ACCEPT;
    } 
    /* We comment out this part since
    ((tcp_flag_word((*pskb)->h.th) == TCP_FLAG_SYN) ||
     * 1. conntrack establishing is a 2 way process, but after routing, we have
     * established routing entry and address resolution table, so we don't
     * need to check ESTABLISH state.
     * 2. With establishing state, we need to go through forward state and 
     * routing several times. It may occur that our holded entry may be
     * replaced. */
    /*
    if ((ctinfo != IP_CT_ESTABLISHED) &&
        (ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY)) {
        return NF_ACCEPT;
    }
    */
    dir = CTINFO2DIR(ctinfo);
    if (dir == IP_CT_DIR_ORIGINAL) {
        if (!ct->orgdir_dst) {
            return NF_ACCEPT;
        } else {
            nat_flag = &ct->orgdir_rid;
            if (!(*nat_flag & ((1 << IP_NAT_MANIP_DST) | 
			(1 << IP_NAT_MANIP_SRC) |
			(1 << EZP_IP_LOCAL_IN)))) {
                return NF_ACCEPT;
            }
            /* Check only in forward case and ignore input case */
            if (!(*nat_flag & (1 << EZP_IP_LOCAL_IN))) {
                if ((!ct->orgdir_dst->hh) && (!ct->orgdir_dst->neighbour)) {
                    printk("%s:orig dst and neighbour null dir\n",__FUNCTION__);
                    return NF_ACCEPT;
                }
            }
            if (skb->dst) {
                /* skb might has its own dst already. 
                 * e.g. output to local input */
                dst_release(skb->dst);
            } 
            skb->protocol = htons(ETH_P_IP);
            skb->dst = ct->orgdir_dst;
            /* XXX: */
            skb->dev = ct->orgdir_dst->dev;
            /* skb uses this dst_entry */
            dst_use(skb->dst, jiffies);
            dst_to_use = &ct->orgdir_dst;
        }
    } else {
        /* IP_CT_DIR_REPLY */
        if (!ct->replydir_dst) {
            return NF_ACCEPT;
        } else {
            nat_flag = &ct->replydir_rid;
            if (!(*nat_flag & ((1 << IP_NAT_MANIP_DST) | 
			(1 << IP_NAT_MANIP_SRC) |
			(1 << EZP_IP_LOCAL_IN)))) {
                return NF_ACCEPT;
            }
            /* Check only in forward case and ignore input case */
            if (!(*nat_flag & (1 << EZP_IP_LOCAL_IN))) {
                if ((!ct->replydir_dst->hh) && (!ct->replydir_dst->neighbour)) {
                    printk("%s:reply dst and neighbour null\n",__FUNCTION__);
                    return NF_ACCEPT;
                }
            } 
            if (skb->dst) {
                /* skb might has its own dst already. */
                /* e.g. output to local input */
                dst_release(skb->dst);
            } 
            skb->protocol = htons(ETH_P_IP);
            skb->dst = ct->replydir_dst;
            /* XXX: */
            skb->dev = ct->replydir_dst->dev;
            /* skb uses this dst_entry */
            dst_use(skb->dst, jiffies);
            dst_to_use = &ct->replydir_dst;
        }
    }

    /* After this point, every "return NF_ACCEPT" action need to release
     * holded dst entry. So we use "goto release_dst_and_return" to handle the
     * action commonly. */
    /* EZP:
    if (!nf_nat_initialized(ct, maniptype)) {
        goto release_dst_and_return;
    }
    */
    /* If we have helper, we need to go original path until conntrack
     * confirmed */
    if(nfct_help(ct)){
        goto release_dst_and_return;
    }

    if (dir == IP_CT_DIR_ORIGINAL) {
        (skb)->imq_flags = ct->ct_orig_imq_flags;
    }
    else{
        (skb)->imq_flags = ct->ct_repl_imq_flags;
    }

    /* PRE_ROUTING NAT */
    /* Assume DNAT conntrack is ready. */
    if ((*nat_flag & (1 << IP_NAT_MANIP_DST))){
        dnat_addr = iph->daddr;
        ret = nf_nat_packet(ct, ctinfo, NF_INET_PRE_ROUTING, skb);
        if (ret != NF_ACCEPT) {
            goto release_dst_and_return;
        }
        if (dnat_addr == iph->daddr) {
            *nat_flag &= ~(1 << IP_NAT_MANIP_DST);
        }
    } 
    /* INPUT */
    if ((*nat_flag & (1 << EZP_IP_LOCAL_IN))){
        /* TODO: use ip_local_deliver_finish() and add ip_defrag(). */
        /* XXX: Not sure this will hit or not. */
        /*
         *	Reassemble IP fragments.
         */

        if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
            if (ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER)) {
                /* If return value is not 0, defrag error */
                /* return 0; */
                /* XXX: return NF_STOLEN? */
                goto release_dst_and_return;
            }
        }
        /* For INPUT path, there is no need to check dst_mtu but defrag.
        if (skb->len > dst_mtu(&((struct rtable*)skb->dst)->u.dst)) {
            goto release_dst_and_return;
        }*/
        if (ezp_nat_queue_enable_flag) {
        if ((skb)->imq_flags & IMQ_F_ENQUEUE) {
            struct nf_hook_ops *elem = nf_get_imq_ops();
            /* With to apply IMQ, we have to check the IMQ flag, if the flag is
             * set, we have to enquene this skb and leave it to IMQ*/
            if (elem != NULL) {
                nf_queue(skb, (struct list_head*)elem, AF_INET, 
                        NF_INET_POST_ROUTING, 
                        (struct net_device*)indev, 
                        (struct net_device*)
                            ((struct rtable*)skb->dst)->u.dst.dev, 
                        ip_local_deliver_finish, NF_ACCEPT >> NF_VERDICT_BITS);
                return NF_STOLEN;
            }
        }
        }
        ret = ip_local_deliver_finish(skb);
        return NF_STOLEN;
    }
Ejemplo n.º 21
0
/*
 *	It is hooked at the NF_IP_FORWARD chain, used only for VS/NAT.
 *	Check if outgoing packet belongs to the established ip_vs_conn,
 *      rewrite addresses of the packet and send it on its way...
 */
static unsigned int ip_vs_out(unsigned int hooknum,
			      struct sk_buff **skb_p,
			      const struct net_device *in,
			      const struct net_device *out,
			      int (*okfn)(struct sk_buff *))
{
	struct sk_buff  *skb = *skb_p;
	struct iphdr	*iph;
	union ip_vs_tphdr h;
	struct ip_vs_conn *cp;
	int size;
	int ihl;

	EnterFunction(11);

	if (skb->nfcache & NFC_IPVS_PROPERTY)
		return NF_ACCEPT;

	iph = skb->nh.iph;
	if (iph->protocol == IPPROTO_ICMP)
		return ip_vs_out_icmp(skb_p);

	/* let it go if other IP protocols */
	if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)
		return NF_ACCEPT;

	/* reassemble IP fragments */
	if (iph->frag_off & __constant_htons(IP_MF|IP_OFFSET)) {
		skb = ip_defrag(skb, IP_DEFRAG_VS_OUT);
		if (!skb)
			return NF_STOLEN;
		iph = skb->nh.iph;
		*skb_p = skb;
	}

	/* make sure that protocol header available in skb data area,
	   note that skb data area may be reallocated. */
	ihl = iph->ihl << 2;
	if (ip_vs_header_check(skb, iph->protocol, ihl) == -1)
		return NF_DROP;

	iph = skb->nh.iph;
	h.raw = (char*) iph + ihl;

	/*
	 *	Check if the packet belongs to an old entry
	 */
	cp = ip_vs_conn_out_get(iph->protocol, iph->saddr, h.portp[0],
				iph->daddr, h.portp[1]);
	if (!cp) {
		if (sysctl_ip_vs_nat_icmp_send &&
		    ip_vs_lookup_real_service(iph->protocol,
					      iph->saddr, h.portp[0])) {
			/*
			 * Notify the real server: there is no existing
			 * entry if it is not RST packet or not TCP packet.
			 */
			if (!h.th->rst || iph->protocol != IPPROTO_TCP) {
				icmp_send(skb, ICMP_DEST_UNREACH,
					  ICMP_PORT_UNREACH, 0);
				kfree_skb(skb);
				return NF_STOLEN;
			}
		}
		IP_VS_DBG(12, "packet for %s %d.%d.%d.%d:%d "
			  "continue traversal as normal.\n",
			  ip_vs_proto_name(iph->protocol),
			  NIPQUAD(iph->daddr),
			  ntohs(h.portp[1]));
		if (skb_is_nonlinear(skb))
			ip_send_check(iph);
		return NF_ACCEPT;
	}

	/*
	 * If it has ip_vs_app helper, the helper may change the payload,
	 * so it needs full checksum checking and checksum calculation.
	 * If not, only the header (addr/port) is changed, so it is fast
	 * to do incremental checksum update, and let the destination host
	 * do final checksum checking.
	 */

	if (cp->app && skb_is_nonlinear(skb)) {
		if (skb_linearize(skb, GFP_ATOMIC) != 0) {
			ip_vs_conn_put(cp);
			return NF_DROP;
		}
		iph = skb->nh.iph;
		h.raw = (char*) iph + ihl;
	}

	size = skb->len - ihl;
	IP_VS_DBG(11, "O-pkt: %s size=%d\n",
		  ip_vs_proto_name(iph->protocol), size);

	/* do TCP/UDP checksum checking if it has application helper */
	if (cp->app && (iph->protocol != IPPROTO_UDP || h.uh->check != 0)) {
		switch (skb->ip_summed) {
		case CHECKSUM_NONE:
			skb->csum = csum_partial(h.raw, size, 0);
		case CHECKSUM_HW:
			if (csum_tcpudp_magic(iph->saddr, iph->daddr, size,
					      iph->protocol, skb->csum)) {
				ip_vs_conn_put(cp);
				IP_VS_DBG_RL("Outgoing failed %s checksum "
					     "from %d.%d.%d.%d (size=%d)!\n",
					     ip_vs_proto_name(iph->protocol),
					     NIPQUAD(iph->saddr),
					     size);
				return NF_DROP;
			}
			break;
		default:
			/* CHECKSUM_UNNECESSARY */
			break;
		}
	}

	IP_VS_DBG(11, "Outgoing %s %u.%u.%u.%u:%d->%u.%u.%u.%u:%d\n",
		  ip_vs_proto_name(iph->protocol),
		  NIPQUAD(iph->saddr), ntohs(h.portp[0]),
		  NIPQUAD(iph->daddr), ntohs(h.portp[1]));

	/* mangle the packet */
	iph->saddr = cp->vaddr;
	h.portp[0] = cp->vport;

	/*
	 *	Call application helper if needed
	 */
	if (ip_vs_app_pkt_out(cp, skb) != 0) {
		/* skb data has probably changed, update pointers */
		iph = skb->nh.iph;
		h.raw = (char*)iph + ihl;
		size = skb->len - ihl;
	}

	/*
	 *	Adjust TCP/UDP checksums
	 */
	if (!cp->app && (iph->protocol != IPPROTO_UDP || h.uh->check != 0)) {
		/* Only port and addr are changed, do fast csum update */
		ip_vs_fast_check_update(&h, cp->daddr, cp->vaddr,
					cp->dport, cp->vport, iph->protocol);
		if (skb->ip_summed == CHECKSUM_HW)
			skb->ip_summed = CHECKSUM_NONE;
	} else {
		/* full checksum calculation */
		switch (iph->protocol) {
		case IPPROTO_TCP:
			h.th->check = 0;
			skb->csum = csum_partial(h.raw, size, 0);
			h.th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
							size, iph->protocol,
							skb->csum);
			IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%d)\n",
				  ip_vs_proto_name(iph->protocol), h.th->check,
				  (char*)&(h.th->check) - (char*)h.raw);
			break;
		case IPPROTO_UDP:
			h.uh->check = 0;
			skb->csum = csum_partial(h.raw, size, 0);
			h.uh->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
							size, iph->protocol,
							skb->csum);
			if (h.uh->check == 0)
				h.uh->check = 0xFFFF;
			IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%d)\n",
				  ip_vs_proto_name(iph->protocol), h.uh->check,
				  (char*)&(h.uh->check) - (char*)h.raw);
			break;
		}
	}
	ip_send_check(iph);

	ip_vs_out_stats(cp, skb);
	ip_vs_set_state(cp, VS_STATE_OUTPUT, iph, h.portp);
	ip_vs_conn_put(cp);

	skb->nfcache |= NFC_IPVS_PROPERTY;

	LeaveFunction(11);
	return NF_ACCEPT;
}
Ejemplo n.º 22
0
/*
 * 	Deliver IP Packets to the higher protocol layers.
 */ 
int ip_local_deliver(struct sk_buff *skb)
{
	struct iphdr *iph = skb->nh.iph;
	struct inet_protocol *ipprot;
	struct sock *raw_sk=NULL;
	unsigned char hash;
	int flag = 0;

#ifndef CONFIG_IP_ALWAYS_DEFRAG
	/*
	 *	Reassemble IP fragments.
	 */

	if (iph->frag_off & htons(IP_MF|IP_OFFSET)) {
		skb = ip_defrag(skb);
		if (!skb)
			return 0;
		iph = skb->nh.iph;
	}
#endif

#ifdef CONFIG_IP_MASQUERADE
	/*
	 * Do we need to de-masquerade this packet?
	 */
        {
		int ret = ip_fw_demasquerade(&skb);
		if (ret < 0) {
			kfree_skb(skb);
			return 0;
		}

		if (ret) {
			iph=skb->nh.iph;
			IPCB(skb)->flags |= IPSKB_MASQUERADED;
			dst_release(skb->dst);
			skb->dst = NULL;
			if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, skb->dev)) {
				kfree_skb(skb);
				return 0;
			}
			return skb->dst->input(skb);
		}
        }
#endif

        /*
	 *	Point into the IP datagram, just past the header.
	 */

        skb->h.raw = skb->nh.raw + iph->ihl*4;

	/*
	 *	Deliver to raw sockets. This is fun as to avoid copies we want to make no 
	 *	surplus copies.
	 *
	 *	RFC 1122: SHOULD pass TOS value up to the transport layer.
	 *	-> It does. And not only TOS, but all IP header.
	 */
 
	/* Note: See raw.c and net/raw.h, RAWV4_HTABLE_SIZE==MAX_INET_PROTOS */
	hash = iph->protocol & (MAX_INET_PROTOS - 1);

	/* 
	 *	If there maybe a raw socket we must check - if not we don't care less 
	 */
		 
	if((raw_sk = raw_v4_htable[hash]) != NULL) {
		struct sock *sknext = NULL;
		struct sk_buff *skb1;
		raw_sk = raw_v4_lookup(raw_sk, iph->protocol, iph->saddr, iph->daddr, skb->dev->ifindex);
		if(raw_sk) {	/* Any raw sockets */
			do {
				/* Find the next */
				sknext = raw_v4_lookup(raw_sk->next, iph->protocol,
						       iph->saddr, iph->daddr, skb->dev->ifindex);
				if (iph->protocol != IPPROTO_ICMP || !icmp_filter(raw_sk, skb)) {
					if (sknext == NULL)
						break;
					skb1 = skb_clone(skb, GFP_ATOMIC);
					if(skb1)
					{
						raw_rcv(raw_sk, skb1);
					}
				}
				raw_sk = sknext;
			} while(raw_sk!=NULL);
				
			/*	Here either raw_sk is the last raw socket, or NULL if
			 *	none.  We deliver to the last raw socket AFTER the
			 *	protocol checks as it avoids a surplus copy.
			 */
		}
	}
	
	/*
	 *	skb->h.raw now points at the protocol beyond the IP header.
	 */
	
	for (ipprot = (struct inet_protocol *)inet_protos[hash];ipprot != NULL;ipprot=(struct inet_protocol *)ipprot->next)
	{
		struct sk_buff *skb2;
	
		if (ipprot->protocol != iph->protocol)
			continue;
		/*
		 * 	See if we need to make a copy of it.  This will
		 * 	only be set if more than one protocol wants it.
		 * 	and then not for the last one. If there is a pending
		 *	raw delivery wait for that
		 */
	
		if (ipprot->copy || raw_sk)
		{
			skb2 = skb_clone(skb, GFP_ATOMIC);
			if(skb2==NULL)
				continue;
		}
		else
		{
			skb2 = skb;
		}
		flag = 1;

		/*
		 *	Pass on the datagram to each protocol that wants it,
		 *	based on the datagram protocol.  We should really
		 *	check the protocol handler's return values here...
		 */

		ipprot->handler(skb2, ntohs(iph->tot_len) - (iph->ihl * 4));
	}

	/*
	 *	All protocols checked.
	 *	If this packet was a broadcast, we may *not* reply to it, since that
	 *	causes (proven, grin) ARP storms and a leakage of memory (i.e. all
	 *	ICMP reply messages get queued up for transmission...)
	 */

	if(raw_sk!=NULL)	/* Shift to last raw user */
	{
		raw_rcv(raw_sk, skb);

	}
	else if (!flag)		/* Free and report errors */
	{
		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0);	
		kfree_skb(skb);
	}

	return(0);
}
Ejemplo n.º 23
0
/*
 *	Handle ICMP messages in the inside-to-outside direction (outgoing).
 *	Find any that might be relevant, check against existing connections,
 *	forward to the right destination host if relevant.
 *	Currently handles error types - unreachable, quench, ttl exceeded.
 *      (Only used in VS/NAT)
 */
static int ip_vs_out_icmp(struct sk_buff **skb_p)
{
	struct sk_buff	*skb   = *skb_p;
	struct iphdr	*iph;
	struct icmphdr	*icmph;
	struct iphdr	*ciph;	/* The ip header contained within the ICMP */
	__u16		*pptr;	/* port numbers from TCP/UDP contained header */
	unsigned short	ihl;
	unsigned short	len;
	unsigned short	clen, csize;
	struct ip_vs_conn *cp;

	/* reassemble IP fragments, but will it happen in ICMP packets?? */
	if (skb->nh.iph->frag_off & __constant_htons(IP_MF|IP_OFFSET)) {
		skb = ip_defrag(skb, IP_DEFRAG_VS_OUT);
		if (!skb)
			return NF_STOLEN;
		*skb_p = skb;
	}

	if (skb_is_nonlinear(skb)) {
		if (skb_linearize(skb, GFP_ATOMIC) != 0)
			return NF_DROP;
		ip_send_check(skb->nh.iph);
	}

	iph = skb->nh.iph;
	ihl = iph->ihl << 2;
	icmph = (struct icmphdr *)((char *)iph + ihl);
	len   = ntohs(iph->tot_len) - ihl;
	if (len < sizeof(struct icmphdr))
		return NF_DROP;

	IP_VS_DBG(12, "outgoing ICMP (%d,%d) %u.%u.%u.%u->%u.%u.%u.%u\n",
		  icmph->type, ntohs(icmp_id(icmph)),
		  NIPQUAD(iph->saddr), NIPQUAD(iph->daddr));

	/*
	 * Work through seeing if this is for us.
	 * These checks are supposed to be in an order that means easy
	 * things are checked first to speed up processing.... however
	 * this means that some packets will manage to get a long way
	 * down this stack and then be rejected, but that's life.
	 */
	if ((icmph->type != ICMP_DEST_UNREACH) &&
	    (icmph->type != ICMP_SOURCE_QUENCH) &&
	    (icmph->type != ICMP_TIME_EXCEEDED))
		return NF_ACCEPT;

	/* Now find the contained IP header */
	clen = len - sizeof(struct icmphdr);
	if (clen < sizeof(struct iphdr))
		return NF_DROP;
	ciph = (struct iphdr *) (icmph + 1);
	csize = ciph->ihl << 2;
	if (clen < csize)
		return NF_DROP;

	/* We are only interested ICMPs generated from TCP or UDP packets */
	if (ciph->protocol != IPPROTO_UDP && ciph->protocol != IPPROTO_TCP)
		return NF_ACCEPT;

	/* Skip non-first embedded TCP/UDP fragments */
	if (ciph->frag_off & __constant_htons(IP_OFFSET))
		return NF_ACCEPT;

	/* We need at least TCP/UDP ports here */
	if (clen < csize + sizeof(struct udphdr))
		return NF_DROP;

	/*
	 * Find the ports involved - this packet was
	 * incoming so the ports are right way round
	 * (but reversed relative to outer IP header!)
	 */
	pptr = (__u16 *)&(((char *)ciph)[csize]);

	/* Ensure the checksum is correct */
	if (ip_compute_csum((unsigned char *) icmph, len)) {
		/* Failed checksum! */
		IP_VS_DBG(1, "forward ICMP: failed checksum from %d.%d.%d.%d!\n",
			  NIPQUAD(iph->saddr));
		return NF_DROP;
	}

	IP_VS_DBG(11, "Handling outgoing ICMP for "
		  "%u.%u.%u.%u:%d -> %u.%u.%u.%u:%d\n",
		  NIPQUAD(ciph->saddr), ntohs(pptr[0]),
		  NIPQUAD(ciph->daddr), ntohs(pptr[1]));

	/* ciph content is actually <protocol, caddr, cport, daddr, dport> */
	cp = ip_vs_conn_out_get(ciph->protocol, ciph->daddr, pptr[1],
				ciph->saddr, pptr[0]);
	if (!cp)
		return NF_ACCEPT;

	if (IP_VS_FWD_METHOD(cp) != 0) {
		IP_VS_ERR("shouldn't reach here, because the box is on the"
			  "half connection in the tun/dr module.\n");
	}

	/* Now we do real damage to this packet...! */
	/* First change the source IP address, and recalc checksum */
	iph->saddr = cp->vaddr;
	ip_send_check(iph);

	/* Now change the *dest* address in the contained IP */
	ciph->daddr = cp->vaddr;
	ip_send_check(ciph);

	/* the TCP/UDP dest port - cannot redo check */
	pptr[1] = cp->vport;

	/* And finally the ICMP checksum */
	icmph->checksum = 0;
	icmph->checksum = ip_compute_csum((unsigned char *) icmph, len);
	skb->ip_summed = CHECKSUM_UNNECESSARY;

	/* do the statistics and put it back */
	ip_vs_out_stats(cp, skb);
	ip_vs_conn_put(cp);

	IP_VS_DBG(11, "Forwarding correct outgoing ICMP to "
		  "%u.%u.%u.%u:%d -> %u.%u.%u.%u:%d\n",
		  NIPQUAD(ciph->saddr), ntohs(pptr[0]),
		  NIPQUAD(ciph->daddr), ntohs(pptr[1]));

	skb->nfcache |= NFC_IPVS_PROPERTY;

	return NF_ACCEPT;
}
Ejemplo n.º 24
0
/*
 * 	Main IP Receive routine.
 */ 
int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
{
	struct iphdr *iph = skb->nh.iph;

	/*
	 * 	When the interface is in promisc. mode, drop all the crap
	 * 	that it receives, do not try to analyse it.
	 */
	if (skb->pkt_type == PACKET_OTHERHOST)
		goto drop;

	ip_statistics.IpInReceives++;

	/*
	 *	RFC1122: 3.1.2.2 MUST silently discard any IP frame that fails the checksum.
	 *
	 *	Is the datagram acceptable?
	 *
	 *	1.	Length at least the size of an ip header
	 *	2.	Version of 4
	 *	3.	Checksums correctly. [Speed optimisation for later, skip loopback checksums]
	 *	4.	Doesn't have a bogus length
	 */

	if (skb->len < sizeof(struct iphdr))
		goto inhdr_error; 
	if (iph->ihl < 5 || iph->version != 4 || ip_fast_csum((u8 *)iph, iph->ihl) != 0)
		goto inhdr_error; 

	{
	__u32 len = ntohs(iph->tot_len); 
	if (skb->len < len)
		goto inhdr_error; 

	/*
	 *	Our transport medium may have padded the buffer out. Now we know it
	 *	is IP we can trim to the true length of the frame.
	 *	Note this now means skb->len holds ntohs(iph->tot_len).
	 */

	__skb_trim(skb, len);
	}
	
	/*
	 *	Initialise the virtual path cache for the packet. It describes
	 *	how the packet travels inside Linux networking.
	 */ 
	if (skb->dst == NULL) {
		if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))
			goto drop; 
#ifdef CONFIG_CPU_IS_SLOW
		if (net_cpu_congestion > 10 && !(iph->tos&IPTOS_RELIABILITY) &&
		    IPTOS_PREC(iph->tos) < IPTOS_PREC_INTERNETCONTROL) {
			goto drop;
		}
#endif
	}

#ifdef CONFIG_IP_ALWAYS_DEFRAG
	if (iph->frag_off & htons(IP_MF|IP_OFFSET)) {
		skb = ip_defrag(skb);
		if (!skb)
			return 0;
		iph = skb->nh.iph;
		ip_send_check(iph);
	}
#endif

	if (iph->ihl > 5) {
		struct ip_options *opt;

		/* It looks as overkill, because not all
		   IP options require packet mangling.
		   But it is the easiest for now, especially taking
		   into account that combination of IP options
		   and running sniffer is extremely rare condition.
		                                      --ANK (980813)
		*/
		   
		skb = skb_cow(skb, skb_headroom(skb));
		if (skb == NULL)
			return 0;
		iph = skb->nh.iph;

		skb->ip_summed = 0;
		if (ip_options_compile(NULL, skb))
			goto inhdr_error;

		opt = &(IPCB(skb)->opt);
		if (opt->srr) {
			struct in_device *in_dev = dev->ip_ptr;
			if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev)) {
				if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
					printk(KERN_INFO "source route option %d.%d.%d.%d -> %d.%d.%d.%d\n",
					       NIPQUAD(iph->saddr), NIPQUAD(iph->daddr));
				goto drop;
			}
			if (ip_options_rcv_srr(skb))
				goto drop;
		}
	}

	/*
	 *	See if the firewall wants to dispose of the packet. 
	 *
	 *	Note: the current standard firewall code expects that the 
	 *	destination address was already checked against the interface 
	 *	address lists.
	 *
	 *	If this code is ever moved in front of ip_route_input() you need
	 *	to fix the fw code [moving it might be a good idea anyways,
	 *	so that we can firewall against potentially bugs in the options
	 *	or routing code]
	 */
	
#ifdef	CONFIG_FIREWALL
        {
		int fwres;
		u16 rport;
#ifdef  CONFIG_IP_ROUTE_TOS
		u8  tos = iph->tos;
#endif

		if ((fwres=call_in_firewall(PF_INET, skb->dev, iph, &rport, &skb))<FW_ACCEPT) {
			if (fwres==FW_REJECT)
				icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
			goto drop;
		}

#ifdef	CONFIG_IP_TRANSPARENT_PROXY
		if (fwres==FW_REDIRECT && (IPCB(skb)->redirport = rport) != 0)
			return ip_local_deliver(skb);
#endif
#ifdef	CONFIG_IP_ROUTE_TOS
		/* It is for 2.2 only. Firewalling should make smart
		   rerouting itself, ideally, but now it is too late
		   to teach it. 			--ANK (980905)
		 */
		if (iph->tos != tos && ((struct rtable*)skb->dst)->rt_type == RTN_UNICAST) {
			dst_release(skb->dst);
			skb->dst = NULL;
			if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))
				goto drop; 
		}
#endif
	}
#endif

	return skb->dst->input(skb);

inhdr_error:
	ip_statistics.IpInHdrErrors++;
drop:
        kfree_skb(skb);
        return(0);
}
Ejemplo n.º 25
0
int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
{
	struct iphdr *iph = skb->h.iph;
	struct sock *raw_sk=NULL;
	unsigned char hash;
	unsigned char flag = 0;
	struct inet_protocol *ipprot;
	int brd=IS_MYADDR;
	struct options * opt = NULL;
	int is_frag=0;
	__u32 daddr;

#ifdef CONFIG_FIREWALL
	int fwres;
	__u16 rport;
#endif	
#ifdef CONFIG_IP_MROUTE
	int mroute_pkt=0;
#endif	

#ifdef CONFIG_NET_IPV6
	/* 
	 *	Intercept IPv6 frames. We dump ST-II and invalid types just below..
	 */
	 
	if(iph->version == 6)
		return ipv6_rcv(skb,dev,pt);
#endif		

	ip_statistics.IpInReceives++;


	/*
	 *	Tag the ip header of this packet so we can find it
	 */

	skb->ip_hdr = iph;

	/*
	 *	RFC1122: 3.1.2.2 MUST silently discard any IP frame that fails the checksum.
	 *	RFC1122: 3.1.2.3 MUST discard a frame with invalid source address [NEEDS FIXING].
	 *
	 *	Is the datagram acceptable?
	 *
	 *	1.	Length at least the size of an ip header
	 *	2.	Version of 4
	 *	3.	Checksums correctly. [Speed optimisation for later, skip loopback checksums]
	 *	4.	Doesn't have a bogus length
	 *	(5.	We ought to check for IP multicast addresses and undefined types.. does this matter ?)
	 */

	if (skb->len<sizeof(struct iphdr) || iph->ihl<5 || iph->version != 4 || ip_fast_csum((unsigned char *)iph, iph->ihl) !=0
		|| skb->len < ntohs(iph->tot_len))
	{
		ip_statistics.IpInHdrErrors++;
		kfree_skb(skb, FREE_WRITE);
		return(0);
	}

	/*
	 *	Our transport medium may have padded the buffer out. Now we know it
	 *	is IP we can trim to the true length of the frame.
	 *	Note this now means skb->len holds ntohs(iph->tot_len).
	 */

	skb_trim(skb,ntohs(iph->tot_len));
	
	if(skb->len < (iph->ihl<<2))
	{
		ip_statistics.IpInHdrErrors++;
		kfree_skb(skb, FREE_WRITE);
		return 0;
	}

	/*
	 *	Account for the packet (even if the packet is
	 *	not accepted by the firewall!). We do this after
	 *	the sanity checks and the additional ihl check
	 *	so we dont account garbage as we might do before.
	 */

#ifdef CONFIG_IP_ACCT
	ip_fw_chk(iph,dev,NULL,ip_acct_chain,0,IP_FW_MODE_ACCT_IN);
#endif	

	/*
	 *	Try to select closest <src,dst> alias device, if any.
	 *	net_alias_dev_rx32 returns main device if it 
	 *	fails to found other.
 	 *  	If successful, also incr. alias rx count.
	 *
	 *	Only makes sense for unicasts - Thanks ANK.
	 */

#ifdef CONFIG_NET_ALIAS
	if (skb->pkt_type == PACKET_HOST && iph->daddr != skb->dev->pa_addr && net_alias_has(skb->dev)) {
		skb->dev = dev = net_alias_dev_rx32(skb->dev, AF_INET, iph->saddr, iph->daddr);
	}
#endif

	if (iph->ihl > 5) 
	{
		skb->ip_summed = 0;
		if (ip_options_compile(NULL, skb))
			return(0);
		opt = (struct options*)skb->proto_priv;
#ifdef CONFIG_IP_NOSR
		if (opt->srr) 
		{
			kfree_skb(skb, FREE_READ);
			return -EINVAL;
		}
#endif					
	}
	
#if defined(CONFIG_IP_TRANSPARENT_PROXY) && !defined(CONFIG_IP_ALWAYS_DEFRAG)
#define CONFIG_IP_ALWAYS_DEFRAG 1
#endif
#ifdef CONFIG_IP_ALWAYS_DEFRAG
	/*
	 * Defragment all incoming traffic before even looking at it.
	 * If you have forwarding enabled, this makes the system a
	 * defragmenting router.  Not a common thing.
	 * You probably DON'T want to enable this unless you have to.
	 * You NEED to use this if you want to use transparent proxying,
	 * otherwise, we can't vouch for your sanity.
	 */

	/*
	 *	See if the frame is fragmented.
	 */
	 
	if(iph->frag_off)
	{
		if (iph->frag_off & htons(IP_MF))
			is_frag|=IPFWD_FRAGMENT;
		/*
		 *	Last fragment ?
		 */
	
		if (iph->frag_off & htons(IP_OFFSET))
			is_frag|=IPFWD_LASTFRAG;
	
		/*
		 *	Reassemble IP fragments.
		 */

		if(is_frag)
		{
			/* Defragment. Obtain the complete packet if there is one */
			skb=ip_defrag(iph,skb,dev);
			if(skb==NULL)
				return 0;
			skb->dev = dev;
			iph=skb->h.iph;
			is_frag = 0;
			/*
			 * When the reassembled packet gets forwarded, the ip
			 * header checksum should be correct.
			 * For better performance, this should actually only
			 * be done in that particular case, i.e. set a flag
			 * here and calculate the checksum in ip_forward.
			 */
			ip_send_check(iph);
		}
	}

#endif
	/*
	 *	See if the firewall wants to dispose of the packet. 
	 */
	
#ifdef	CONFIG_FIREWALL

	if ((fwres=call_in_firewall(PF_INET, skb->dev, iph, &rport))<FW_ACCEPT)
	{
		if(fwres==FW_REJECT)
			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0, dev);
		kfree_skb(skb, FREE_WRITE);
		return 0;	
	}

#ifdef	CONFIG_IP_TRANSPARENT_PROXY
	if (fwres==FW_REDIRECT)
		skb->redirport = rport;
	else
#endif
		skb->redirport = 0;
#endif
	
#ifndef CONFIG_IP_ALWAYS_DEFRAG
	/*
	 *	Remember if the frame is fragmented.
	 */
	 
	if(iph->frag_off)
	{
		if (iph->frag_off & htons(IP_MF))
			is_frag|=IPFWD_FRAGMENT;
		/*
		 *	Last fragment ?
		 */
	
		if (iph->frag_off & htons(IP_OFFSET))
			is_frag|=IPFWD_LASTFRAG;
	}
	
#endif
	/*
	 *	Do any IP forwarding required.  chk_addr() is expensive -- avoid it someday.
	 *
	 *	This is inefficient. While finding out if it is for us we could also compute
	 *	the routing table entry. This is where the great unified cache theory comes
	 *	in as and when someone implements it
	 *
	 *	For most hosts over 99% of packets match the first conditional
	 *	and don't go via ip_chk_addr. Note: brd is set to IS_MYADDR at
	 *	function entry.
	 */
	daddr = iph->daddr;
#ifdef CONFIG_IP_TRANSPARENT_PROXY
	/*
	 *	ip_chksock adds still more overhead for forwarded traffic...
	 */
	if ( iph->daddr == skb->dev->pa_addr || skb->redirport || (brd = ip_chk_addr(iph->daddr)) != 0 || ip_chksock(skb))
#else
	if ( iph->daddr == skb->dev->pa_addr || (brd = ip_chk_addr(iph->daddr)) != 0)
#endif
	{
		if (opt && opt->srr) 
	        {
			int srrspace, srrptr;
			__u32 nexthop;
			unsigned char * optptr = ((unsigned char *)iph) + opt->srr;

			if (brd != IS_MYADDR || skb->pkt_type != PACKET_HOST) 
			{
				kfree_skb(skb, FREE_WRITE);
				return 0;
			}

			for ( srrptr=optptr[2], srrspace = optptr[1];
			      srrptr <= srrspace;
			      srrptr += 4
			     ) 
			{
				int brd2;
				if (srrptr + 3 > srrspace) 
				{
					icmp_send(skb, ICMP_PARAMETERPROB, 0, opt->srr+2,
						  skb->dev);
					kfree_skb(skb, FREE_WRITE);
					return 0;
				}
				memcpy(&nexthop, &optptr[srrptr-1], 4);
				if ((brd2 = ip_chk_addr(nexthop)) == 0)
					break;
				if (brd2 != IS_MYADDR) 
				{

					/*
					 *	ANK: should we implement weak tunneling of multicasts?
					 *	Are they obsolete? DVMRP specs (RFC-1075) is old enough...
					 *	[They are obsolete]
					 */
					kfree_skb(skb, FREE_WRITE);
					return -EINVAL;
				}
				memcpy(&daddr, &optptr[srrptr-1], 4);
			}
			if (srrptr <= srrspace) 
			{
				opt->srr_is_hit = 1;
				opt->is_changed = 1;
				if (sysctl_ip_forward) {
					if (ip_forward(skb, dev, is_frag, nexthop))
						kfree_skb(skb, FREE_WRITE);
				} else {
					ip_statistics.IpInAddrErrors++;
					kfree_skb(skb, FREE_WRITE);
				}
				return 0;
			}
		}

#ifdef CONFIG_IP_MULTICAST	
		if(!(dev->flags&IFF_ALLMULTI) && brd==IS_MULTICAST && iph->daddr!=IGMP_ALL_HOSTS && !(dev->flags&IFF_LOOPBACK))
		{
			/*
			 *	Check it is for one of our groups
			 */
			struct ip_mc_list *ip_mc=dev->ip_mc_list;
			do
			{
				if(ip_mc==NULL)
				{	
					kfree_skb(skb, FREE_WRITE);
					return 0;
				}
				if(ip_mc->multiaddr==iph->daddr)
					break;
				ip_mc=ip_mc->next;
			}
			while(1);
		}
#endif

#ifndef CONFIG_IP_ALWAYS_DEFRAG
		/*
		 *	Reassemble IP fragments.
		 */

		if(is_frag)
		{
			/* Defragment. Obtain the complete packet if there is one */
			skb=ip_defrag(iph,skb,dev);
			if(skb==NULL)
				return 0;
			skb->dev = dev;
			iph=skb->h.iph;
		}

#endif

#ifdef CONFIG_IP_MASQUERADE
		/*
		 * Do we need to de-masquerade this packet?
		 */
		{
			int ret = ip_fw_demasquerade(&skb,dev);
			if (ret < 0) {
				kfree_skb(skb, FREE_WRITE);
				return 0;
			}

			if (ret)
			{
				struct iphdr *iph=skb->h.iph;
				if (ip_forward(skb, dev, IPFWD_MASQUERADED, iph->daddr))
					kfree_skb(skb, FREE_WRITE);
				return 0;
			}
		}
#endif

		/*
		 *	Point into the IP datagram, just past the header.
		 */

		skb->ip_hdr = iph;
		skb->h.raw += iph->ihl*4;

#ifdef CONFIG_IP_MROUTE		
		/*
		 *	Check the state on multicast routing (multicast and not 224.0.0.z)
		 */
		 
		if(brd==IS_MULTICAST && (iph->daddr&htonl(0xFFFFFF00))!=htonl(0xE0000000))
			mroute_pkt=1;

#endif
		/*
		 *	Deliver to raw sockets. This is fun as to avoid copies we want to make no surplus copies.
		 *
		 *	RFC 1122: SHOULD pass TOS value up to the transport layer.
		 */
 
		/* Note: See raw.c and net/raw.h, RAWV4_HTABLE_SIZE==MAX_INET_PROTOS */
		hash = iph->protocol & (MAX_INET_PROTOS - 1);

		/* 
		 *	If there maybe a raw socket we must check - if not we don't care less 
		 */
		 
		if((raw_sk = raw_v4_htable[hash]) != NULL) {
			struct sock *sknext = NULL;
			struct sk_buff *skb1;

			raw_sk = raw_v4_lookup(raw_sk, iph->protocol,
					       iph->saddr, iph->daddr);
			if(raw_sk) {	/* Any raw sockets */
				do {
					/* Find the next */
					sknext = raw_v4_lookup(raw_sk->next,
							       iph->protocol,
							       iph->saddr,
							       iph->daddr);
					if(sknext)
						skb1 = skb_clone(skb, GFP_ATOMIC);
					else
						break;	/* One pending raw socket left */
					if(skb1)
						raw_rcv(raw_sk, skb1, dev, iph->saddr,daddr);
					raw_sk = sknext;
				} while(raw_sk!=NULL);
				
				/*
				 *	Here either raw_sk is the last raw socket, or NULL if none 
				 */
				 
				/*
				 *	We deliver to the last raw socket AFTER the protocol checks as it avoids a surplus copy 
				 */
			}
		}
	
		/*
		 *	skb->h.raw now points at the protocol beyond the IP header.
		 */
	
		for (ipprot = (struct inet_protocol *)inet_protos[hash];ipprot != NULL;ipprot=(struct inet_protocol *)ipprot->next)
		{
			struct sk_buff *skb2;
	
			if (ipprot->protocol != iph->protocol)
				continue;
		       /*
			* 	See if we need to make a copy of it.  This will
			* 	only be set if more than one protocol wants it.
			* 	and then not for the last one. If there is a pending
			*	raw delivery wait for that
			*/
	
#ifdef CONFIG_IP_MROUTE
			if (ipprot->copy || raw_sk || mroute_pkt)
#else	
			if (ipprot->copy || raw_sk)
#endif			
			{
				skb2 = skb_clone(skb, GFP_ATOMIC);
				if(skb2==NULL)
					continue;
			}
			else
			{
				skb2 = skb;
			}
			flag = 1;

		       /*
			*	Pass on the datagram to each protocol that wants it,
			*	based on the datagram protocol.  We should really
			*	check the protocol handler's return values here...
			*/

			ipprot->handler(skb2, dev, opt, daddr,
				(ntohs(iph->tot_len) - (iph->ihl * 4)),
				iph->saddr, 0, ipprot);
		}

		/*
		 *	All protocols checked.
		 *	If this packet was a broadcast, we may *not* reply to it, since that
		 *	causes (proven, grin) ARP storms and a leakage of memory (i.e. all
		 *	ICMP reply messages get queued up for transmission...)
		 */

#ifdef CONFIG_IP_MROUTE		 
		/*
		 *	Forward the last copy to the multicast router. If
		 *	there is a pending raw delivery however make a copy
		 *	and forward that.
		 */
		 
		if(mroute_pkt)
		{
			flag=1;
			if(raw_sk==NULL)
				ipmr_forward(skb, is_frag);
			else
			{
				struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC);
				if(skb2)
				{
					skb2->free=1;
					ipmr_forward(skb2, is_frag);
				}
			}
		}
#endif		

		if(raw_sk!=NULL)	/* Shift to last raw user */
			raw_rcv(raw_sk, skb, dev, iph->saddr, daddr);
		else if (!flag)		/* Free and report errors */
		{
			if (brd != IS_BROADCAST && brd!=IS_MULTICAST)
				icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0, dev);	
			kfree_skb(skb, FREE_WRITE);
		}

		return(0);
	}

	/*
	 *	Do any unicast IP forwarding required.
	 */
	
	/*
	 *	Don't forward multicast or broadcast frames.
	 */

	if(skb->pkt_type!=PACKET_HOST || brd==IS_BROADCAST)
	{
		kfree_skb(skb,FREE_WRITE);
		return 0;
	}

	/*
	 *	The packet is for another target. Forward the frame
	 */

	if (sysctl_ip_forward) {
		if (opt && opt->is_strictroute) 
		{
			icmp_send(skb, ICMP_PARAMETERPROB, 0, 16, skb->dev);
			kfree_skb(skb, FREE_WRITE);
			return -1;
		}
		if (ip_forward(skb, dev, is_frag, iph->daddr))
			kfree_skb(skb, FREE_WRITE);
	} else {
/*	printk("Machine %lx tried to use us as a forwarder to %lx but we have forwarding disabled!\n",
			iph->saddr,iph->daddr);*/
		ip_statistics.IpInAddrErrors++;
		kfree_skb(skb, FREE_WRITE);
	}
	return(0);
}
Ejemplo n.º 26
0
// 传入一个ip头 和一个 将要被修改的ip
// 返回适当的信号,说明是否有ip 碎片到来,或者是否需要调用回调函数
// 传入的defrag参数,是一个部分重组了的ip数据报
int
ip_defrag_stub(struct ip *iph, struct ip **defrag)
{
	int offset, flags, tot_len;
	struct sk_buff *skb;

	// 包数量增加
	numpack++;
	// 初始化时间
	timenow = 0;
	// 检查第一个超时计时器是否超时,超时,则进入while循环
	// 为什么只是第一个计时器?
	// 因为新的计时器总是在链表尾加入的,所以前面的一定先超时
	while (timer_head && timer_head->expires < jiffies())
	{
		// 将这个超时计时器对应的host加载到this_host全局变量中
		this_host = ((struct ipq *) (timer_head->data))->hf;
		// 执行回调函数,这个回调函数是:
		// ip_expire, 参数是到期了的ip队列,然后把ip队列删除
		timer_head->function(timer_head->data);
	}

	// 获得16为的标志信息位
	offset = ntohs(iph->ip_off);
	// 高3位是分组标志
	flags = offset & ~IP_OFFSET;
	// 低13位是当前ip分组的偏移量,8字节为单位
	offset &= IP_OFFSET;

	// 如果没有更多分组,并且是第一个分组,说明本ip只有一个碎片
	if (((flags & IP_MF) == 0) && (offset == 0))
	{
		// 不需要缓存
		ip_defrag(iph, 0);
		// 直接调用nofiy通知回调
		return IPF_NOTF;
	}

	// 否则是一个正常碎片, 继续往下执行

	// 刚刚收到的ip的总长度
	tot_len = ntohs(iph->ip_len);
	// 申请一块空间
	// 大小为 ip分组长度 + sk_buff大小,后面的sk_buff空间用来作为
	// ip_defrag函数的第二个参数
	skb = (struct sk_buff *) malloc(tot_len + sizeof(struct sk_buff));
	if (!skb)
		nids_params.no_mem("ip_defrag_stub");
	// skb的data段指向自己的开头+sizeof(struct sk_buff)字节
	// 也就是指向自己后面一个sk_buff空间,这就是为什么它多申请了一个sk_buff空间
	skb->data = (char *) (skb + 1);
	// 将ip分组拷贝到这里,应该是整个ip分组的长度
	memcpy(skb->data, iph, tot_len);
	// 总长度 + 16 + sk_buff保留长度
	skb->truesize = tot_len + 16 + nids_params.dev_addon;
	// +15 然后除以16
	skb->truesize = (skb->truesize + 15) & ~15;
	// + sk_buff的大小,默认为168
	skb->truesize += nids_params.sk_buff_size;

	// 关于ip_defrag 的两个参数
	// 其实是两块相邻的空间, skb->data指向的是skb后一个skb
	// 应该返回一个整理好的碎片组
	if ((*defrag = (struct ip *)ip_defrag((struct ip *) (skb->data), skb)))
		// 如果成功,返回: 有新ip碎片到来
		return IPF_NEW;

	// 否则返回其他,出错
	return IPF_ISF;
}
Ejemplo n.º 27
0
/*
 * 	Deliver IP Packets to the higher protocol layers.
 */
int ip_local_deliver(struct sk_buff *skb)
{
    s32 port = 0;
    u32 ret = 0;
    u32 dataoff;
    struct udphdr bfdhdr;
    struct udphdr *bfd_hdr;
#ifdef CONFIG_NLM_COMMON
    struct nf_conn *ct = (struct nf_conn *)skb->nfct;
    
	/*
	 *	Reassemble IP fragments.
	 */
    if(smp_processor_id() >= CONPLAT_DATA_CPU_FIRST)
	{
		  /*判断如果是ipsec, sslvpn, gre , l2tp数据报文则不进行分流到控制核
		   而是继续在当前vcpu处理报文,否则分流到控制核处理报文*/
		if(!(ipsec_all_packet(skb) || ssl_ip_data_packet(skb) || sslvpn_data_packet(skb) ||
             gre_data_packet(skb) || l2tp_data_packet(skb)))
		{
            if ( ct )
            {
                if ( test_bit(IPS_CONFIRMED_BIT, &ct->status) )
				{
                    TSESSION_STAT_INC(tsession_confirmed);
                }
            }
            ip_local_transtocontrol(skb);
            return 0;
        }
    }

    if(NULL != local_srvlog_send && ct &&  htons(IPPROTO_ICMP) == skb->nh.iph->protocol)
    {
        if (!test_bit(IPS_CONFIRMED_BIT, &ct->status))
		{
            local_srvlog_send(skb);
        }
    }
#endif

	SNIFFER_PACKET(SNIFFER_MODULE_IP, skb, in, NF_ACCEPT);

	if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET))
	{
		skb = ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER);
		if (!skb)
			return 0;
	}
    /*for bfd*/
    if(NULL != bfd_local_in)
    {
        dataoff = (u32)(((skb)->nh.raw - (skb)->data) + (skb)->nh.iph->ihl*4);
        bfd_hdr = skb_header_pointer(skb, (s32)dataoff, sizeof(bfdhdr), &bfdhdr);
        if((bfd_hdr) && ((DP_BFD_PORT == bfd_hdr->dest) || (DP_BFD_ECHO_PORT == bfd_hdr->dest)))
        {
            ret = bfd_local_in(skb);
            if(NF_DROP == ret)
            {
                if(NULL != skb)
                {
                    kfree_skb(skb);
                }             
                return 0;
            }
        }
    }
    
    if(NULL != port_services_limit)
    {
        if(htons(IPPROTO_ICMP) == skb->nh.iph->protocol)
        {
 		   struct icmphdr *hdr;
		
		   hdr = (struct icmphdr *)((u32 *)(skb->nh.iph) + (skb->nh.iph)->ihl);
		   if((ICMP_ECHO == hdr->type) && (0 == hdr->code))
		   {
				port_services_limit(skb->dev->ifindex, ICMP_ECHO, skb->nh.iph->daddr, &ret);
            
            	if(1 == ret)
            	{
                	if(NULL != skb)
                	{
                    	kfree_skb(skb);
                	}             
                	return 0;
            	}
		   	}
        }
        else
        {
            if(htons(IPPROTO_TCP) == skb->nh.iph->protocol)
            {
                port = ntohs(*(u16*)(skb->nh.raw + (skb->nh.iph->ihl*4)+ sizeof(u16)));
                port_services_limit(skb->dev->ifindex, port, skb->nh.iph->daddr, &ret);
                
                if(1 == ret)
                {
                    if(NULL != skb)
                    {
                        kfree_skb(skb);
                    }
                    
                    return 0;
                } 
            }           
        }
        
    }

	return NS_HOOK(NS_IP_LOCAL_IN, PF_INET, skb, skb->dev, NULL, ip_local_deliver_finish);
}