Beispiel #1
0
static int br_nf_dev_queue_xmit(struct sk_buff *skb)
{
	int ret;

	if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
	    skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
	    !skb_is_gso(skb)) {
		if (br_parse_ip_options(skb))
			/* Drop invalid packet */
			return NF_DROP;
		ret = ip_fragment(skb, br_dev_queue_push_xmit);
	} else
		ret = br_dev_queue_push_xmit(skb);

	return ret;
}
Beispiel #2
0
static int br_nf_dev_queue_xmit(struct sk_buff *skb)
{
	int ret;

	if (!skb_is_gso(skb) &&
	    skb->protocol == htons(ETH_P_IP) &&
	    skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu) {
		if (br_parse_ip_options(skb))
			/* Drop invalid packet */
			goto drop;
		ret = ip_fragment(skb, br_dev_queue_push_xmit);
	} else
		ret = br_dev_queue_push_xmit(skb);

	return ret;
drop:
	kfree_skb(skb);
	return 0;
}
int FASTPATHNET bcm_fast_path(struct sk_buff *skb)
{
	if (skb->dst == NULL) {
		struct iphdr *iph = ip_hdr(skb);
		struct net_device *dev = skb->dev;

		if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev)) {
			kfree_skb(skb);
			return -EINVAL;
		}

		/*  Change skb owner to output device */
		skb->dev = skb->dst->dev;
	}

	if (unlikely(skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb)))
		return ip_fragment(skb, bcm_fast_path_output);

	return bcm_fast_path_output(skb);
}
Beispiel #4
0
static int br_nf_dev_queue_xmit(struct sk_buff *skb)
{
	int ret;
	int frag_max_size;

	/* This is wrong! We should preserve the original fragment
	 * boundaries by preserving frag_list rather than refragmenting.
	 */
	if (skb->protocol == htons(ETH_P_IP) &&
	    skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
	    !skb_is_gso(skb)) {
		frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
		if (br_parse_ip_options(skb))
			/* Drop invalid packet */
			return NF_DROP;
		IPCB(skb)->frag_max_size = frag_max_size;
		ret = ip_fragment(skb, br_dev_queue_push_xmit);
	} else
		ret = br_dev_queue_push_xmit(skb);

	return ret;
}
Beispiel #5
0
static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
{
    int ret;
    int frag_max_size;
    unsigned int mtu_reserved;

    if (skb_is_gso(skb) || skb->protocol != htons(ETH_P_IP))
        return br_dev_queue_push_xmit(sk, skb);

    mtu_reserved = nf_bridge_mtu_reduction(skb);
    /* This is wrong! We should preserve the original fragment
     * boundaries by preserving frag_list rather than refragmenting.
     */
    if (skb->len + mtu_reserved > skb->dev->mtu) {
        struct brnf_frag_data *data;

        frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
        if (br_parse_ip_options(skb))
            /* Drop invalid packet */
            return NF_DROP;
        IPCB(skb)->frag_max_size = frag_max_size;

        nf_bridge_update_protocol(skb);

        data = this_cpu_ptr(&brnf_frag_data_storage);
        data->encap_size = nf_bridge_encap_header_len(skb);
        data->size = ETH_HLEN + data->encap_size;

        skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
                                         data->size);

        ret = ip_fragment(sk, skb, br_nf_push_frag_xmit);
    } else {
        ret = br_dev_queue_push_xmit(sk, skb);
    }

    return ret;
}
Beispiel #6
0
static unsigned int ip_refrag(unsigned int hooknum,
			      struct sk_buff **pskb,
			      const struct net_device *in,
			      const struct net_device *out,
			      int (*okfn)(struct sk_buff *))
{
	struct rtable *rt = (struct rtable *)(*pskb)->dst;

	/* We've seen it coming out the other side: confirm */
	if (ip_confirm(hooknum, pskb, in, out, okfn) != NF_ACCEPT)
		return NF_DROP;

	/* Local packets are never produced too large for their
	   interface.  We degfragment them at LOCAL_OUT, however,
	   so we have to refragment them here. */
	if ((*pskb)->len > dst_pmtu(&rt->u.dst) &&
	    !skb_shinfo(*pskb)->tso_size) {
		/* No hook can be after us, so this should be OK. */
		ip_fragment(*pskb, okfn);
		return NF_STOLEN;
	}
	return NF_ACCEPT;
}
Beispiel #7
0
static inline int bcm_fast_path(struct sk_buff *skb)
{
	if (skb_dst(skb) == NULL) {
		struct iphdr *iph = ip_hdr(skb);
		struct net_device *dev = skb->dev;

		if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev)) {
			return NF_DROP;
		}
		/*  Change skb owner to output device */
		skb->dev = skb_dst(skb)->dev;
	}

	if (skb_dst(skb)) {
		if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
			return ip_fragment(skb, bcm_fast_path_output);
		else
			return bcm_fast_path_output(skb);
	}

	kfree_skb(skb);
	return -EINVAL;
}
Beispiel #8
0
void ip_queue_xmit(struct sock *sk, struct device *dev,
	      struct sk_buff *skb, int free)
{
	unsigned int tot_len;
	struct iphdr *iph;

	IS_SKB(skb);

	/*
	 *	Do some book-keeping in the packet for later
	 */

	skb->sk = sk;
	skb->dev = dev;
	skb->when = jiffies;

	/*
	 *	Find the IP header and set the length. This is bad
	 *	but once we get the skb data handling code in the
	 *	hardware will push its header sensibly and we will
	 *	set skb->ip_hdr to avoid this mess and the fixed
	 *	header length problem
	 */

	iph = skb->ip_hdr;
	tot_len = skb->len - (((unsigned char *)iph) - skb->data);
	iph->tot_len = htons(tot_len);

	switch (free) {
		/* No reassigning numbers to fragments... */
		default:
			free = 1;
			break;
		case 0:
			add_to_send_queue(sk, skb);
			/* fall through */
		case 1:
			iph->id = htons(ip_id_count++);
	}

	skb->free = free;

	/* Sanity check */
	if (dev == NULL)
		goto no_device;

#ifdef CONFIG_FIREWALL
	if (call_out_firewall(PF_INET, skb->dev, iph, NULL) < FW_ACCEPT)
		goto out;
#endif	

	/*
	 *	Do we need to fragment. Again this is inefficient.
	 *	We need to somehow lock the original buffer and use
	 *	bits of it.
	 */

	if (tot_len > dev->mtu)
		goto fragment;

	/*
	 *	Add an IP checksum
	 */

	ip_send_check(iph);

	/*
	 *	More debugging. You cannot queue a packet already on a list
	 *	Spot this and moan loudly.
	 */
	if (skb->next != NULL)
	{
		NETDEBUG(printk("ip_queue_xmit: next != NULL\n"));
		skb_unlink(skb);
	}

	/*
	 *	If the indicated interface is up and running, send the packet.
	 */
	 
	ip_statistics.IpOutRequests++;
#ifdef CONFIG_IP_ACCT
	ip_fw_chk(iph,dev,NULL,ip_acct_chain,0,IP_FW_MODE_ACCT_OUT);
#endif	

#ifdef CONFIG_IP_MULTICAST	

	/*
	 *	Multicasts are looped back for other local users
	 */
	 
	if (MULTICAST(iph->daddr) && !(dev->flags&IFF_LOOPBACK))
	{
		if(sk==NULL || sk->ip_mc_loop)
		{
			if(iph->daddr==IGMP_ALL_HOSTS || (dev->flags&IFF_ALLMULTI))
			{
				ip_loopback(dev,skb);
			}
			else
			{
				struct ip_mc_list *imc=dev->ip_mc_list;
				while(imc!=NULL)
				{
					if(imc->multiaddr==iph->daddr)
					{
						ip_loopback(dev,skb);
						break;
					}
					imc=imc->next;
				}
			}
		}
		/* Multicasts with ttl 0 must not go beyond the host */
		
		if (iph->ttl==0)
			goto out;
	}
#endif
	if ((dev->flags & IFF_BROADCAST) && !(dev->flags & IFF_LOOPBACK)
	    && (iph->daddr==dev->pa_brdaddr || iph->daddr==0xFFFFFFFF))
		ip_loopback(dev,skb);
		
	if (dev->flags & IFF_UP)
	{
		/*
		 *	If we have an owner use its priority setting,
		 *	otherwise use NORMAL
		 */
		int priority = SOPRI_NORMAL;
		if (sk)
			priority = sk->priority;

		dev_queue_xmit(skb, dev, priority);
		return;
	}
	if(sk)
		sk->err = ENETDOWN;
	ip_statistics.IpOutDiscards++;
out:
	if (free)
		kfree_skb(skb, FREE_WRITE);
	return;

no_device:
	NETDEBUG(printk("IP: ip_queue_xmit dev = NULL\n"));
	goto out;

fragment:
	ip_fragment(sk,skb,dev,0);
	goto out;
}