Exemplo n.º 1
0
int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
		  const struct tnl_ptk_info *tpi, int hdr_len, bool log_ecn_error)
{
	struct pcpu_tstats *tstats;
	const struct iphdr *iph = ip_hdr(skb);
	int err;

	secpath_reset(skb);

	skb->protocol = tpi->proto;

	skb->mac_header = skb->network_header;
	__pskb_pull(skb, hdr_len);
	skb_postpull_rcsum(skb, skb_transport_header(skb), tunnel->hlen);
#ifdef CONFIG_NET_IPGRE_BROADCAST
	if (ipv4_is_multicast(iph->daddr)) {
		/* Looped back packet, drop it! */
		if (rt_is_output_route(skb_rtable(skb)))
			goto drop;
		tunnel->dev->stats.multicast++;
		skb->pkt_type = PACKET_BROADCAST;
	}
#endif

	if ((!(tpi->flags&TUNNEL_CSUM) &&  (tunnel->parms.i_flags&TUNNEL_CSUM)) ||
	     ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {
		tunnel->dev->stats.rx_crc_errors++;
		tunnel->dev->stats.rx_errors++;
		goto drop;
	}

	if (tunnel->parms.i_flags&TUNNEL_SEQ) {
		if (!(tpi->flags&TUNNEL_SEQ) ||
		    (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
			tunnel->dev->stats.rx_fifo_errors++;
			tunnel->dev->stats.rx_errors++;
			goto drop;
		}
		tunnel->i_seqno = ntohl(tpi->seq) + 1;
	}

	/* Warning: All skb pointers will be invalidated! */
	if (tunnel->dev->type == ARPHRD_ETHER) {
		if (!pskb_may_pull(skb, ETH_HLEN)) {
			tunnel->dev->stats.rx_length_errors++;
			tunnel->dev->stats.rx_errors++;
			goto drop;
		}

		iph = ip_hdr(skb);
		skb->protocol = eth_type_trans(skb, tunnel->dev);
		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
	}

	skb->pkt_type = PACKET_HOST;
	__skb_tunnel_rx(skb, tunnel->dev);

	skb_reset_network_header(skb);
	err = IP_ECN_decapsulate(iph, skb);
	if (unlikely(err)) {
		if (log_ecn_error)
			net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
					&iph->saddr, iph->tos);
		if (err > 1) {
			++tunnel->dev->stats.rx_frame_errors;
			++tunnel->dev->stats.rx_errors;
			goto drop;
		}
	}

	tstats = this_cpu_ptr(tunnel->dev->tstats);
	u64_stats_update_begin(&tstats->syncp);
	tstats->rx_packets++;
	tstats->rx_bytes += skb->len;
	u64_stats_update_end(&tstats->syncp);

	gro_cells_receive(&tunnel->gro_cells, skb);
	return 0;

drop:
	kfree_skb(skb);
	return 0;
}
Exemplo n.º 2
0
int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
{
	struct sk_buff *frag;
	struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
	struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
	struct ipv6hdr *tmp_hdr;
	struct frag_hdr *fh;
	unsigned int mtu, hlen, left, len;
	int hroom, troom;
	__be32 frag_id = 0;
	int ptr, offset = 0, err=0;
	u8 *prevhdr, nexthdr = 0;
	struct net *net = dev_net(skb_dst(skb)->dev);

	hlen = ip6_find_1stfragopt(skb, &prevhdr);
	nexthdr = *prevhdr;

	mtu = ip6_skb_dst_mtu(skb);

	/* We must not fragment if the socket is set to force MTU discovery
	 * or if the skb it not generated by a local socket.
	 */
	if (!skb->local_df && skb->len > mtu) {
		skb->dev = skb_dst(skb)->dev;
		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
			      IPSTATS_MIB_FRAGFAILS);
		kfree_skb(skb);
		return -EMSGSIZE;
	}

	if (np && np->frag_size < mtu) {
		if (np->frag_size)
			mtu = np->frag_size;
	}
	mtu -= hlen + sizeof(struct frag_hdr);

	if (skb_has_frag_list(skb)) {
		int first_len = skb_pagelen(skb);
		struct sk_buff *frag2;

		if (first_len - hlen > mtu ||
		    ((first_len - hlen) & 7) ||
		    skb_cloned(skb))
			goto slow_path;

		skb_walk_frags(skb, frag) {
			/* Correct geometry. */
			if (frag->len > mtu ||
			    ((frag->len & 7) && frag->next) ||
			    skb_headroom(frag) < hlen)
				goto slow_path_clean;

			/* Partially cloned skb? */
			if (skb_shared(frag))
				goto slow_path_clean;

			BUG_ON(frag->sk);
			if (skb->sk) {
				frag->sk = skb->sk;
				frag->destructor = sock_wfree;
			}
			skb->truesize -= frag->truesize;
		}

		err = 0;
		offset = 0;
		frag = skb_shinfo(skb)->frag_list;
		skb_frag_list_init(skb);
		/* BUILD HEADER */

		*prevhdr = NEXTHDR_FRAGMENT;
		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
		if (!tmp_hdr) {
			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
				      IPSTATS_MIB_FRAGFAILS);
			return -ENOMEM;
		}

		__skb_pull(skb, hlen);
		fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
		__skb_push(skb, hlen);
		skb_reset_network_header(skb);
		memcpy(skb_network_header(skb), tmp_hdr, hlen);

		ipv6_select_ident(fh, rt);
		fh->nexthdr = nexthdr;
		fh->reserved = 0;
		fh->frag_off = htons(IP6_MF);
		frag_id = fh->identification;

		first_len = skb_pagelen(skb);
		skb->data_len = first_len - skb_headlen(skb);
		skb->len = first_len;
		ipv6_hdr(skb)->payload_len = htons(first_len -
						   sizeof(struct ipv6hdr));

		dst_hold(&rt->dst);

		for (;;) {
			/* Prepare header of the next frame,
			 * before previous one went down. */
			if (frag) {
				frag->ip_summed = CHECKSUM_NONE;
				skb_reset_transport_header(frag);
				fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
				__skb_push(frag, hlen);
				skb_reset_network_header(frag);
				memcpy(skb_network_header(frag), tmp_hdr,
				       hlen);
				offset += skb->len - hlen - sizeof(struct frag_hdr);
				fh->nexthdr = nexthdr;
				fh->reserved = 0;
				fh->frag_off = htons(offset);
				if (frag->next != NULL)
					fh->frag_off |= htons(IP6_MF);
				fh->identification = frag_id;
				ipv6_hdr(frag)->payload_len =
						htons(frag->len -
						      sizeof(struct ipv6hdr));
				ip6_copy_metadata(frag, skb);
			}

			err = output(skb);
			if(!err)
				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
					      IPSTATS_MIB_FRAGCREATES);

			if (err || !frag)
				break;

			skb = frag;
			frag = skb->next;
			skb->next = NULL;
		}

		kfree(tmp_hdr);

		if (err == 0) {
			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
				      IPSTATS_MIB_FRAGOKS);
			ip6_rt_put(rt);
			return 0;
		}

		while (frag) {
			skb = frag->next;
			kfree_skb(frag);
			frag = skb;
		}

		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
			      IPSTATS_MIB_FRAGFAILS);
		ip6_rt_put(rt);
		return err;

slow_path_clean:
		skb_walk_frags(skb, frag2) {
			if (frag2 == frag)
				break;
			frag2->sk = NULL;
			frag2->destructor = NULL;
			skb->truesize += frag2->truesize;
		}
	}

slow_path:
	left = skb->len - hlen;		/* Space per frame */
	ptr = hlen;			/* Where to start from */

	/*
	 *	Fragment the datagram.
	 */

	*prevhdr = NEXTHDR_FRAGMENT;
	hroom = LL_RESERVED_SPACE(rt->dst.dev);
	troom = rt->dst.dev->needed_tailroom;

	/*
	 *	Keep copying data until we run out.
	 */
	while(left > 0)	{
		len = left;
		/* IF: it doesn't fit, use 'mtu' - the data space left */
		if (len > mtu)
			len = mtu;
		/* IF: we are not sending up to and including the packet end
		   then align the next start on an eight byte boundary */
		if (len < left)	{
			len &= ~7;
		}

		/* Allocate buffer */
		frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
				 hroom + troom, GFP_ATOMIC);
		if (!frag) {
			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
				      IPSTATS_MIB_FRAGFAILS);
			err = -ENOMEM;
			goto fail;
		}

		/*
		 *	Set up data on packet
		 */

		ip6_copy_metadata(frag, skb);
		skb_reserve(frag, hroom);
		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
		skb_reset_network_header(frag);
		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
		frag->transport_header = (frag->network_header + hlen +
					  sizeof(struct frag_hdr));

		/*
		 *	Charge the memory for the fragment to any owner
		 *	it might possess
		 */
		if (skb->sk)
			skb_set_owner_w(frag, skb->sk);

		/*
		 *	Copy the packet header into the new buffer.
		 */
		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);

		/*
		 *	Build fragment header.
		 */
		fh->nexthdr = nexthdr;
		fh->reserved = 0;
		if (!frag_id) {
			ipv6_select_ident(fh, rt);
			frag_id = fh->identification;
		} else
			fh->identification = frag_id;

		/*
		 *	Copy a block of the IP datagram.
		 */
		if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
			BUG();
		left -= len;

		fh->frag_off = htons(offset);
		if (left > 0)
			fh->frag_off |= htons(IP6_MF);
		ipv6_hdr(frag)->payload_len = htons(frag->len -
						    sizeof(struct ipv6hdr));

		ptr += len;
		offset += len;

		/*
		 *	Put this fragment into the sending queue.
		 */
		err = output(frag);
		if (err)
			goto fail;

		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
			      IPSTATS_MIB_FRAGCREATES);
	}
	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
		      IPSTATS_MIB_FRAGOKS);
	kfree_skb(skb);
	return err;

fail:
	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
		      IPSTATS_MIB_FRAGFAILS);
	kfree_skb(skb);
	return err;
}
Exemplo n.º 3
0
static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
			struct rtable *rt,
			unsigned int flags)
{
	struct inet_sock *inet = inet_sk(sk);
	struct iphdr *iph;
	struct sk_buff *skb;
	unsigned int iphlen;
	int err;

	if (length > rt->u.dst.dev->mtu) {
		ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport,
			       rt->u.dst.dev->mtu);
		return -EMSGSIZE;
	}
	if (flags&MSG_PROBE)
		goto out;

	skb = sock_alloc_send_skb(sk,
				  length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15,
				  flags & MSG_DONTWAIT, &err);
	if (skb == NULL)
		goto error;
	skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev));

	skb->priority = sk->sk_priority;
	skb->mark = sk->sk_mark;
	skb->dst = dst_clone(&rt->u.dst);

	skb_reset_network_header(skb);
	iph = ip_hdr(skb);
	skb_put(skb, length);

	skb->ip_summed = CHECKSUM_NONE;

	skb->transport_header = skb->network_header;
	err = memcpy_fromiovecend((void *)iph, from, 0, length);
	if (err)
		goto error_fault;

	/* We don't modify invalid header */
	iphlen = iph->ihl * 4;
	if (iphlen >= sizeof(*iph) && iphlen <= length) {
		if (!iph->saddr)
			iph->saddr = rt->rt_src;
		iph->check   = 0;
		iph->tot_len = htons(length);
		if (!iph->id)
			ip_select_ident(iph, &rt->u.dst, NULL);

		iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
	}
	if (iph->protocol == IPPROTO_ICMP)
		icmp_out_count(((struct icmphdr *)
			skb_transport_header(skb))->type);

	err = NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
		      dst_output);
	if (err > 0)
		err = inet->recverr ? net_xmit_errno(err) : 0;
	if (err)
		goto error;
out:
	return 0;

error_fault:
	err = -EFAULT;
	kfree_skb(skb);
error:
	IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
	return err;
}
Exemplo n.º 4
0
static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
			struct rtable **rtp,
			unsigned int flags)
{
	struct inet_sock *inet = inet_sk(sk);
	struct net *net = sock_net(sk);
	struct iphdr *iph;
	struct sk_buff *skb;
	unsigned int iphlen;
	int err;
	struct rtable *rt = *rtp;

	if (length > rt->dst.dev->mtu) {
		ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
			       rt->dst.dev->mtu);
		return -EMSGSIZE;
	}
	if (flags&MSG_PROBE)
		goto out;

	skb = sock_alloc_send_skb(sk,
				  length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15,
				  flags & MSG_DONTWAIT, &err);
	if (skb == NULL)
		goto error;
	skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev));

	skb->priority = sk->sk_priority;
	skb->mark = sk->sk_mark;
	skb_dst_set(skb, &rt->dst);
	*rtp = NULL;

	skb_reset_network_header(skb);
	iph = ip_hdr(skb);
	skb_put(skb, length);

	skb->ip_summed = CHECKSUM_NONE;

	skb->transport_header = skb->network_header;
	err = -EFAULT;
	if (memcpy_fromiovecend((void *)iph, from, 0, length))
		goto error_free;

	iphlen = iph->ihl * 4;

	/*
	 * We don't want to modify the ip header, but we do need to
	 * be sure that it won't cause problems later along the network
	 * stack.  Specifically we want to make sure that iph->ihl is a
	 * sane value.  If ihl points beyond the length of the buffer passed
	 * in, reject the frame as invalid
	 */
	err = -EINVAL;
	if (iphlen > length)
		goto error_free;

	if (iphlen >= sizeof(*iph)) {
		if (!iph->saddr)
			iph->saddr = rt->rt_src;
		iph->check   = 0;
		iph->tot_len = htons(length);
		if (!iph->id)
			ip_select_ident(iph, &rt->dst, NULL);

		iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
	}
	if (iph->protocol == IPPROTO_ICMP)
		icmp_out_count(net, ((struct icmphdr *)
			skb_transport_header(skb))->type);

	err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
		      rt->dst.dev, dst_output);
	if (err > 0)
		err = net_xmit_errno(err);
	if (err)
		goto error;
out:
	return 0;

error_free:
	kfree_skb(skb);
error:
	IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
	if (err == -ENOBUFS && !inet->recverr)
		err = 0;
	return err;
}
Exemplo n.º 5
0
/*
 *	Check if this packet is complete.
 *	Returns NULL on failure by any reason, and pointer
 *	to current nexthdr field in reassembled frame.
 *
 *	It is called with locked fq, and caller must check that
 *	queue is eligible for reassembly i.e. it is not COMPLETE,
 *	the last and the first frames arrived and all the bits are here.
 */
static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
			  struct net_device *dev)
{
	struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
	struct sk_buff *fp, *head = fq->q.fragments;
	int    payload_len;
	unsigned int nhoff;

	fq_kill(fq);

	/* Make the one we just received the head. */
	if (prev) {
		head = prev->next;
		fp = skb_clone(head, GFP_ATOMIC);

		if (!fp)
			goto out_oom;

		fp->next = head->next;
		if (!fp->next)
			fq->q.fragments_tail = fp;
		prev->next = fp;

		skb_morph(head, fq->q.fragments);
		head->next = fq->q.fragments->next;

		kfree_skb(fq->q.fragments);
		fq->q.fragments = head;
	}

	WARN_ON(head == NULL);
	WARN_ON(FRAG6_CB(head)->offset != 0);

	/* Unfragmented part is taken from the first segment. */
	payload_len = ((head->data - skb_network_header(head)) -
		       sizeof(struct ipv6hdr) + fq->q.len -
		       sizeof(struct frag_hdr));
	if (payload_len > IPV6_MAXPLEN)
		goto out_oversize;

	/* Head of list must not be cloned. */
	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
		goto out_oom;

	/* If the first fragment is fragmented itself, we split
	 * it to two chunks: the first with data and paged part
	 * and the second, holding only fragments. */
	if (skb_has_frag_list(head)) {
		struct sk_buff *clone;
		int i, plen = 0;

		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
			goto out_oom;
		clone->next = head->next;
		head->next = clone;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_frag_list_init(head);
		for (i=0; i<skb_shinfo(head)->nr_frags; i++)
			plen += skb_shinfo(head)->frags[i].size;
		clone->len = clone->data_len = head->data_len - plen;
		head->data_len -= clone->len;
		head->len -= clone->len;
		clone->csum = 0;
		clone->ip_summed = head->ip_summed;
		atomic_add(clone->truesize, &fq->q.net->mem);
	}

	/* We have to remove fragment header from datagram and to relocate
	 * header in order to calculate ICV correctly. */
	nhoff = fq->nhoffset;
	skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
	memmove(head->head + sizeof(struct frag_hdr), head->head,
		(head->data - head->head) - sizeof(struct frag_hdr));
	head->mac_header += sizeof(struct frag_hdr);
	head->network_header += sizeof(struct frag_hdr);

	skb_shinfo(head)->frag_list = head->next;
	skb_reset_transport_header(head);
	skb_push(head, head->data - skb_network_header(head));

	for (fp=head->next; fp; fp = fp->next) {
		head->data_len += fp->len;
		head->len += fp->len;
		if (head->ip_summed != fp->ip_summed)
			head->ip_summed = CHECKSUM_NONE;
		else if (head->ip_summed == CHECKSUM_COMPLETE)
			head->csum = csum_add(head->csum, fp->csum);
		head->truesize += fp->truesize;
	}
	atomic_sub(head->truesize, &fq->q.net->mem);

	head->next = NULL;
	head->dev = dev;
	head->tstamp = fq->q.stamp;
	ipv6_hdr(head)->payload_len = htons(payload_len);
	IP6CB(head)->nhoff = nhoff;
	IP6CB(head)->flags |= IP6SKB_FRAGMENTED;

	/* Yes, and fold redundant checksum back. 8) */
	if (head->ip_summed == CHECKSUM_COMPLETE)
		head->csum = csum_partial(skb_network_header(head),
					  skb_network_header_len(head),
					  head->csum);

	rcu_read_lock();
	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
	rcu_read_unlock();
	fq->q.fragments = NULL;
	fq->q.fragments_tail = NULL;
	return 1;

out_oversize:
	if (net_ratelimit())
		printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
	goto out_fail;
out_oom:
	if (net_ratelimit())
		printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
out_fail:
	rcu_read_lock();
	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
	rcu_read_unlock();
	return -1;
}
Exemplo n.º 6
0
static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
			      int assert)
{
	struct sk_buff *skb;
	struct mrt6msg *msg;
	int ret;

#ifdef CONFIG_IPV6_PIMSM_V2
	if (assert == MRT6MSG_WHOLEPKT)
		skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
						+sizeof(*msg));
	else
#endif
		skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);

	if (!skb)
		return -ENOBUFS;

	/* I suppose that internal messages
	 * do not require checksums */

	skb->ip_summed = CHECKSUM_UNNECESSARY;

#ifdef CONFIG_IPV6_PIMSM_V2
	if (assert == MRT6MSG_WHOLEPKT) {
		/* Ugly, but we have no choice with this interface.
		   Duplicate old header, fix length etc.
		   And all this only to mangle msg->im6_msgtype and
		   to set msg->im6_mbz to "mbz" :-)
		 */
		skb_push(skb, -skb_network_offset(pkt));

		skb_push(skb, sizeof(*msg));
		skb_reset_transport_header(skb);
		msg = (struct mrt6msg *)skb_transport_header(skb);
		msg->im6_mbz = 0;
		msg->im6_msgtype = MRT6MSG_WHOLEPKT;
		msg->im6_mif = net->ipv6.mroute_reg_vif_num;
		msg->im6_pad = 0;
		ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
		ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);

		skb->ip_summed = CHECKSUM_UNNECESSARY;
	} else
#endif
	{
	/*
	 *	Copy the IP header
	 */

	skb_put(skb, sizeof(struct ipv6hdr));
	skb_reset_network_header(skb);
	skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));

	/*
	 *	Add our header
	 */
	skb_put(skb, sizeof(*msg));
	skb_reset_transport_header(skb);
	msg = (struct mrt6msg *)skb_transport_header(skb);

	msg->im6_mbz = 0;
	msg->im6_msgtype = assert;
	msg->im6_mif = mifi;
	msg->im6_pad = 0;
	ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
	ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);

	skb_dst_set(skb, dst_clone(skb_dst(pkt)));
	skb->ip_summed = CHECKSUM_UNNECESSARY;
	}

	if (net->ipv6.mroute6_sk == NULL) {
		kfree_skb(skb);
		return -EINVAL;
	}

	/*
	 *	Deliver to user space multicast routing algorithms
	 */
	ret = sock_queue_rcv_skb(net->ipv6.mroute6_sk, skb);
	if (ret < 0) {
		if (net_ratelimit())
			printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
		kfree_skb(skb);
	}

	return ret;
}
/**
 * Netfilter hook function for incoming packets
 *  
 */
unsigned int in_hook_func(unsigned int hooknum, struct sk_buff* skb, const struct net_device* in, 
		const struct net_device* out, int (*okfn)(struct sk_buff* ))
{
	int i;	
	sock_buff = skb;

	/* Extract network header using accessor */
	ip_header = (struct iphdr* )skb_network_header(sock_buff);  
	udp_header = (struct udphdr* )skb_transport_header(sock_buff);
	tcp_header = (struct udphdr* )skb_transport_header(sock_buff); 
       
    if(!sock_buff) { return NF_ACCEPT;}
	/* Iterate through the T_RULES array */
	for(i=0;i<ruleCount;i++) {
		/* Check if the rule is for incoming packets */
		if(T_RULES[i].pkt == 1) {
			/* Check if the rule has protocol field set to ALL */
			if(T_RULES[i].proto == 0 ) {
				/* Check IP address */
				if(compare_ip((unsigned int)ip_header->saddr, T_RULES[i].srcip)) {
					/* Check destination port number */
					if(compare_port(tcp_header->dest, T_RULES[i].dstpt)) {
						/* Check whether to BLOCK the packet */
						if(T_RULES[i].block == 1) {
							printk(KERN_INFO"firewall: Blocking incoming pkts\n");
							return NF_DROP;
						} else {
							if(T_RULES[i].block == 0) {
								printk(KERN_INFO"firewall: Unblocking incoming pkts");
								return NF_ACCEPT;
							}
						}
					}	
				}
			} 
			/* Check if the rule has protocol field set to TCP */
			if((T_RULES[i].proto == 1) && (ip_header->protocol == 6)){
				/* Check IP address */
				if(compare_ip((unsigned int)ip_header->saddr, T_RULES[i].srcip)) {
					/* Check destination port number */
					if(compare_port(tcp_header->dest, T_RULES[i].dstpt)) {
						/* Check whether to BLOCK the packet */
						if(T_RULES[i].block == 1) {							
							printk(KERN_INFO"firewall: Blocking Incoming TCP pkts\n");
							return NF_DROP;
						} else {
							if(T_RULES[i].block == 0) {
								printk(KERN_INFO"firewall: Unblocking Incoming TCP pkts");
								return NF_ACCEPT;
							}
						}
					}
				}
			}
			/* Check if the rule has protocol field set to UDP */
			if((T_RULES[i].proto == 2) && (ip_header->protocol == 17)) {
				/* Check IP address */
				if(compare_ip((unsigned int)ip_header->saddr, T_RULES[i].srcip)) {
					/* Check destination port number */
					if(compare_port(tcp_header->dest, T_RULES[i].dstpt)) {
						/* Check whether to BLOCK the packet */
						if(T_RULES[i].block == 1) {
							printk(KERN_INFO"firewall: Blocking Incoming UDP pkts\n");
							return NF_DROP;
						} else {
							if(T_RULES[i].block == 0) {
								printk(KERN_INFO"firewall: Unblocking all Incoming UDP pkts");
								return NF_ACCEPT;
							}
						}		
					}
				} 	
			}	
		}

	}
	return NF_ACCEPT;
}
Exemplo n.º 8
0
enum ipsec_rcv_value
ipsec_rcv_ipcomp_decomp(struct ipsec_rcv_state *irs)
{
	unsigned int flags = 0;
	struct ipsec_sa *ipsp = irs->ipsp;
	struct sk_buff *skb;

	skb=irs->skb;

	ipsec_xmit_dmp("ipcomp", skb_transport_header(skb), skb->len);

	if(ipsp == NULL) {
		return IPSEC_RCV_SAIDNOTFOUND;
	}

	if(sysctl_ipsec_inbound_policy_check &&
	   ((((ntohl(ipsp->ips_said.spi) & 0x0000ffff) != (ntohl(irs->said.spi) & 0x0000ffff)) &&
	     (ipsp->ips_encalg != ntohl(irs->said.spi))   /* this is a workaround for peer non-compliance with rfc2393 */
		    ))) {
		char sa2[SATOT_BUF];
		size_t sa_len2 = 0;

		sa_len2 = KLIPS_SATOT(debug_rcv, &ipsp->ips_said, 0, sa2, sizeof(sa2));

		KLIPS_PRINT(debug_rcv,
			    "klips_debug:ipsec_rcv_ipcomp_decomp: "
			    "Incoming packet with SA(IPCA):%s does not match policy SA(IPCA):%s cpi=%04x cpi->spi=%08x spi=%08x, spi->cpi=%04x for SA grouping, dropped.\n",
			    irs->sa_len ? irs->sa : " (error)",
			    sa_len2 ? sa2 : " (error)",
			    ntohs(irs->protostuff.ipcompstuff.compp->ipcomp_cpi),
			    (__u32)ntohl(irs->said.spi),
			    (__u32)ntohl((ipsp->ips_said.spi)),
			    (__u16)(ntohl(ipsp->ips_said.spi) & 0x0000ffff));
		if(irs->stats) {
			irs->stats->rx_dropped++;
		}
		return IPSEC_RCV_SAIDNOTFOUND;
	}

	if (osw_ip_hdr_version(irs) == 6)
		ipsp->ips_comp_ratio_cbytes += ntohs(osw_ip6_hdr(irs)->payload_len)
				+ sizeof(struct ipv6hdr);
	else
		ipsp->ips_comp_ratio_cbytes += ntohs(osw_ip4_hdr(irs)->tot_len);
	irs->next_header = irs->protostuff.ipcompstuff.compp->ipcomp_nh;

#ifdef CONFIG_KLIPS_OCF
	if (irs->ipsp->ocf_in_use)
		return(ipsec_ocf_rcv(irs));
#endif

	skb = skb_decompress(skb, ipsp, &flags);
	if (!skb || flags) {
		KLIPS_PRINT(debug_rcv,
			    "klips_debug:ipsec_rcv_ipcomp_decomp: "
			    "skb_decompress() returned error flags=%x, dropped.\n",
			    flags);
		if (irs->stats) {
			if (flags)
				irs->stats->rx_errors++;
			else
				irs->stats->rx_dropped++;
		}
		return IPSEC_RCV_IPCOMPFAILED;
	}

	/* make sure we update the pointer */
	irs->skb = skb;

	irs->iph = (void *) ip_hdr(skb);

	if (osw_ip_hdr_version(irs) == 6)
		ipsp->ips_comp_ratio_dbytes += ntohs(osw_ip6_hdr(irs)->payload_len)
				+ sizeof(struct ipv6hdr);
	else
		ipsp->ips_comp_ratio_dbytes += ntohs(osw_ip4_hdr(irs)->tot_len);

	KLIPS_PRINT(debug_rcv,
		    "klips_debug:ipsec_rcv_ipcomp_decomp: "
		    "packet decompressed SA(IPCA):%s cpi->spi=%08x spi=%08x, spi->cpi=%04x, nh=%d.\n",
		    irs->sa_len ? irs->sa : " (error)",
		    (__u32)ntohl(irs->said.spi),
		    ipsp != NULL ? (__u32)ntohl((ipsp->ips_said.spi)) : 0,
		    ipsp != NULL ? (__u16)(ntohl(ipsp->ips_said.spi) & 0x0000ffff) : 0,
		    irs->next_header);
	KLIPS_IP_PRINT(debug_rcv & DB_RX_PKTRX, irs->iph);

	return IPSEC_RCV_OK;
}
Exemplo n.º 9
0
static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
				     struct sk_buff *skb,
				     struct iwl_tfh_tfd *tfd, int start_len,
				     u8 hdr_len, struct iwl_device_cmd *dev_cmd)
{
#ifdef CONFIG_INET
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
	struct ieee80211_hdr *hdr = (void *)skb->data;
	unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
	unsigned int mss = skb_shinfo(skb)->gso_size;
	u16 length, iv_len, amsdu_pad;
	u8 *start_hdr;
	struct iwl_tso_hdr_page *hdr_page;
	struct page **page_ptr;
	struct tso_t tso;

	/* if the packet is protected, then it must be CCMP or GCMP */
	iv_len = ieee80211_has_protected(hdr->frame_control) ?
		IEEE80211_CCMP_HDR_LEN : 0;

	trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
			     &dev_cmd->hdr, start_len, 0);

	ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
	snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
	total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
	amsdu_pad = 0;

	/* total amount of header we may need for this A-MSDU */
	hdr_room = DIV_ROUND_UP(total_len, mss) *
		(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;

	/* Our device supports 9 segments at most, it will fit in 1 page */
	hdr_page = get_page_hdr(trans, hdr_room);
	if (!hdr_page)
		return -ENOMEM;

	get_page(hdr_page->page);
	start_hdr = hdr_page->pos;
	page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
	*page_ptr = hdr_page->page;
	memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
	hdr_page->pos += iv_len;

	/*
	 * Pull the ieee80211 header + IV to be able to use TSO core,
	 * we will restore it for the tx_status flow.
	 */
	skb_pull(skb, hdr_len + iv_len);

	/*
	 * Remove the length of all the headers that we don't actually
	 * have in the MPDU by themselves, but that we duplicate into
	 * all the different MSDUs inside the A-MSDU.
	 */
	le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);

	tso_start(skb, &tso);

	while (total_len) {
		/* this is the data left for this subframe */
		unsigned int data_left = min_t(unsigned int, mss, total_len);
		struct sk_buff *csum_skb = NULL;
		unsigned int tb_len;
		dma_addr_t tb_phys;
		u8 *subf_hdrs_start = hdr_page->pos;

		total_len -= data_left;

		memset(hdr_page->pos, 0, amsdu_pad);
		hdr_page->pos += amsdu_pad;
		amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
				  data_left)) & 0x3;
		ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
		hdr_page->pos += ETH_ALEN;
		ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
		hdr_page->pos += ETH_ALEN;

		length = snap_ip_tcp_hdrlen + data_left;
		*((__be16 *)hdr_page->pos) = cpu_to_be16(length);
		hdr_page->pos += sizeof(length);

		/*
		 * This will copy the SNAP as well which will be considered
		 * as MAC header.
		 */
		tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);

		hdr_page->pos += snap_ip_tcp_hdrlen;

		tb_len = hdr_page->pos - start_hdr;
		tb_phys = dma_map_single(trans->dev, start_hdr,
					 tb_len, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
			dev_kfree_skb(csum_skb);
			goto out_err;
		}
		iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
		trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_len);
		/* add this subframe's headers' length to the tx_cmd */
		le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);

		/* prepare the start_hdr for the next subframe */
		start_hdr = hdr_page->pos;

		/* put the payload */
		while (data_left) {
			tb_len = min_t(unsigned int, tso.size, data_left);
			tb_phys = dma_map_single(trans->dev, tso.data,
						 tb_len, DMA_TO_DEVICE);
			if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
				dev_kfree_skb(csum_skb);
				goto out_err;
			}
			iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
			trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
						tb_len);

			data_left -= tb_len;
			tso_build_data(skb, &tso, tb_len);
		}
	}

	/* re -add the WiFi header and IV */
	skb_push(skb, hdr_len + iv_len);

	return 0;

out_err:
#endif
	return -EINVAL;
}
Exemplo n.º 10
0
enum ipsec_rcv_value ipsec_ocf_rcv(struct ipsec_rcv_state *irs)
{
	struct cryptop *crp;
	struct cryptodesc *crde = NULL, *crda = NULL, *crdc = NULL;
	struct ipsec_sa *ipsp;
	int req_count = 0;
	int rc, err;

	KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv\n");

	ipsp = irs->ipsp;
	if (!ipsp) {
		KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv: "
			    "no SA for rcv processing\n");
		return IPSEC_RCV_SAIDNOTFOUND;
	}

	if (!irs->skb) {
		KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv: no skb\n");
		return IPSEC_RCV_SAIDNOTFOUND;
	}

	switch (ipsp->ips_said.proto) {
	case IPPROTO_COMP:
		rc = ipsec_ocf_ipcomp_copy_expand(irs);
		if (rc != IPSEC_RCV_OK) {
			KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv: "
				    "growing skb for ipcomp failed, rc=%d\n",
				    rc);
			return rc;
		}
		break;
	case IPPROTO_ESP:
	case IPPROTO_AH:
		break;
	default:
		KLIPS_PRINT(debug_rcv & DB_RX_XF, "klips_debug:ipsec_ocf_rcv: "
			    "bad protocol %d\n", ipsp->ips_said.proto);
		return IPSEC_RCV_BADPROTO;
	}

	req_count = (ipsp->ips_authalg ? 1 : 0) +
		    (ipsp->ips_encalg  ? 1 : 0);
	crp = crypto_getreq(req_count);

	if (!crp) {
		KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv: "
			    "crypto_getreq returned NULL\n");
		return IPSEC_RCV_REALLYBAD;
	}

	/* we currently don't support any chaining across protocols */
	switch (ipsp->ips_said.proto) {
	case IPPROTO_ESP:
		/*
		 * we are decrypting,  from the setup in ipsec_ocf_sa_init above,  we
		 * need to flip the order of hash/cipher for receive so that it is
		 * hash first then decrypt.  Transmit is ok.
		 */
		if (crp->crp_desc && crp->crp_desc->crd_next) {
			crda = crp->crp_desc;
			crde = crda->crd_next;
		} else {
			crde = crp->crp_desc;
			crda = crde->crd_next;
		}
		break;
	case IPPROTO_COMP:
		crdc = crp->crp_desc;
		break;
	case IPPROTO_AH:
		crda = crp->crp_desc;
		break;
	}

	if (crda) {
		/* Authentication descriptor */
		crda->crd_alg = ipsec_ocf_authalg(ipsp->ips_authalg);
		if (!crda->crd_alg) {
			KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv: "
				    "bad auth alg 0x%x\n", ipsp->ips_authalg);
			crypto_freereq(crp);
			return IPSEC_RCV_BADPROTO;
		}

		if (!crde) { /* assuming AH processing */
			/* push the IP header so we can authenticate it */
			skb_push(irs->skb,
				 ((unsigned char *)irs->protostuff.ahstuff.ahp) -
				 ((unsigned char *)irs->iph));
		}

		crda->crd_key          = ipsp->ips_key_a;
		crda->crd_klen         = ipsp->ips_key_bits_a;
		crda->crd_inject       = irs->authenticator - irs->skb->data;

		/* OCF needs cri_mlen initialized in order to properly migrate the
		 * session to another driver */
		crda->crd_mlen = 12;

		/* Copy the authenticator to check aganinst later */
		memcpy(irs->hash, irs->authenticator, 12);

		if (!crde) { /* assume AH processing */
			/* AH processing, save fields we have to zero */
			if (lsw_ip_hdr_version(irs) == 4) {
				irs->ttl                   =
					lsw_ip4_hdr(irs)->ttl;
				irs->check                 =
					lsw_ip4_hdr(irs)->check;
				irs->frag_off              =
					lsw_ip4_hdr(irs)->frag_off;
				irs->tos                   =
					lsw_ip4_hdr(irs)->tos;
				lsw_ip4_hdr(irs)->ttl      = 0;
				lsw_ip4_hdr(irs)->check    = 0;
				lsw_ip4_hdr(irs)->frag_off = 0;
				lsw_ip4_hdr(irs)->tos      = 0;
			}
			crda->crd_len      = irs->skb->len;
			crda->crd_skip     = ((unsigned char *)irs->iph) -
					     irs->skb->data;
			memset(irs->authenticator, 0, 12);
		} else {
			crda->crd_len      = irs->ilen;
			crda->crd_skip     =
				((unsigned char *) irs->protostuff.espstuff.
				 espp) -
				irs->skb->data;
			/*
			 * It would be nice to clear the authenticator here
			 * to be sure we do not see it again later when checking.
			 * We cannot.  Some HW actually expects to check the in-data
			 * hash and and flag an error if it is incorrect.
			 *
			 * What we do to allow this is to pass in the current in-data
			 * value.  Your OCF driver must ensure that it fails a request
			 * for hash+decrypt with an invalid hash value, or returns the
			 * computed in-data hash as requested.
			 *
			 * If your driver does not check the in-data hash but just
			 * computes it value,  you must ensure that it does not return
			 * the original in-data hash by accident.  It must invalidate the
			 * in-data hash itself to force an auth check error.
			 *
			 * All existing drivers that do not care about the current
			 * in-data hash do this by clearing the in-data hash before
			 * processing, either directly or via their implementation.
			 */
#if 0
			memset(irs->authenticator, 0, 12);
#endif
		}
	}

	if (crde) {
		crde->crd_alg = ipsec_ocf_encalg(ipsp->ips_encalg);
		if (!crde->crd_alg) {
			KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv: "
				    "bad enc alg 0x%x\n", ipsp->ips_encalg);
			crypto_freereq(crp);
			return IPSEC_RCV_BADPROTO;
		}

		irs->esphlen     = ESP_HEADER_LEN + ipsp->ips_iv_size;
		irs->ilen       -= irs->esphlen;
		crde->crd_skip   =
			(skb_transport_header(irs->skb) -
			 irs->skb->data) + irs->esphlen;
		crde->crd_len    = irs->ilen;
		crde->crd_inject = crde->crd_skip - ipsp->ips_iv_size;
		crde->crd_klen   = ipsp->ips_key_bits_e;
		crde->crd_key    = ipsp->ips_key_e;
	}

	if (crdc) {
		struct ipcomphdr *cmph;
		int compalg = ipsp->ips_encalg;
		/* Decompression descriptor */
		crdc->crd_alg = ipsec_ocf_compalg(compalg);
		if (!crdc->crd_alg) {
			KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv: "
				    "bad decomp alg 0x%x\n",
				    ipsp->ips_encalg);
			crypto_freereq(crp);
			return IPSEC_RCV_BADPROTO;
		}
		crdc->crd_flags  = 0;
		/* this is where the current ipcomp header is */
		cmph = (struct ipcomphdr*)((char*)irs->iph + irs->iphlen);
		/* store the nested protocol */
		irs->next_header = cmph->ipcomp_nh;
		/* start decompressing after ip header and the ipcomp header */
		crdc->crd_skip   = ((unsigned char*)irs->iph) + irs->iphlen +
				   sizeof(struct ipcomphdr) - irs->skb->data;
		/* decompress all ip data past the ipcomp header */
		if (lsw_ip_hdr_version(irs) == 6) {
			crdc->crd_len    =
				(ntohs(lsw_ip6_hdr(irs)->payload_len) +
				 sizeof(struct ipv6hdr)) -
				irs->iphlen -
				sizeof(struct ipcomphdr);
		} else {
			crdc->crd_len    = ntohs(lsw_ip4_hdr(irs)->tot_len) -
					   irs->iphlen -
					   sizeof(struct ipcomphdr);
		}
		/* decompress inplace (some hardware can only do inplace) */
		crdc->crd_inject = crdc->crd_skip;
	}

	crp->crp_ilen = irs->skb->len;  /* Total input length */
	crp->crp_olen = irs->skb->len;  /* Total output length */
	crp->crp_flags =
		CRYPTO_F_SKBUF |
		(ipsec_ocf_cbimm ? CRYPTO_F_CBIMM : 0) |
		(ipsec_ocf_batch ? CRYPTO_F_BATCH : 0);
	crp->crp_buf = (caddr_t) irs->skb;
	crp->crp_callback = ipsec_ocf_rcv_cb;
	crp->crp_sid = ipsp->ocf_cryptoid;
	crp->crp_opaque = (caddr_t) irs;
rcv_migrate:
	if ((err = crypto_dispatch(crp))) {
		KLIPS_PRINT(debug_rcv, "crypto_dispatch rcv failure %u\n",
			    err);
		crypto_freereq(crp);
		return IPSEC_RCV_REALLYBAD;
	}
	if (crp->crp_etype == EAGAIN) {
		/* Session has been migrated. Store the new session id and retry */
		ipsp->ocf_cryptoid = crp->crp_sid;
		goto rcv_migrate;
	}

	return IPSEC_RCV_PENDING;
}
Exemplo n.º 11
0
static int ipsec_ocf_xmit_cb(struct cryptop *crp)
{
	struct ipsec_xmit_state *ixs =
		(struct ipsec_xmit_state *)crp->crp_opaque;
	struct iphdr *newiph;
	struct ipcomphdr *cmph;
	unsigned orig_len, comp_len;
	struct cryptodesc *crdc = NULL;

	KLIPS_PRINT(debug_tunnel & DB_TN_XMIT,
		    "klips_debug:ipsec_ocf_xmit_cb\n");

	if (ixs == NULL) {
		KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_ocf_xmit_cb: "
			    "NULL ixs in callback\n");
		return 0;
	}

	/*
	 * we must update the state before returning to the state machine.
	 * if we have an error,  terminate the processing by moving to the DONE
	 * state
	 */

	ixs->state = IPSEC_XSM_DONE; /* assume bad xmit */
	if (crp->crp_etype) {
		ptrdiff_t ptr_delta;

		if (crp->crp_etype == EAGAIN) {
			/* Session has been migrated. Store the new session id and retry */
			KLIPS_PRINT(debug_tunnel & DB_TN_XMIT,
				    "klips_debug:ipsec_ocf_xmit_cb: crypto session migrated\n");
			ixs->ipsp->ocf_cryptoid = crp->crp_sid;
			/* resubmit request */
			if (crypto_dispatch(crp) == 0)
				return 0;
			/* resubmit failed */
		}

		KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_ocf_xmit_cb: "
			    "error in processing 0x%x\n",
			    crp->crp_etype);

		switch (ixs->ipsp->ips_said.proto) {
		case IPPROTO_COMP:
			/*
			 * It's ok for compression to fail... we made a clone
			 * of the packet, so we just revert it now...
			 */
			if (!ixs->pre_ipcomp_skb) {
				KLIPS_PRINT(debug_tunnel & DB_TN_XMIT,
					    "klips_debug:ipsec_ocf_xmit_cb: "
					    "IPcomp on %u bytes failed, "
					    "but we have no clone!\n",
					    (unsigned int)
					    (lsw_ip_hdr_version(ixs) == 6 ?
					     (ntohs(lsw_ip6_hdr(ixs)->
						    payload_len) +
					      sizeof(struct ipv6hdr))
					     :
					     ntohs(lsw_ip4_hdr(
							   ixs)->tot_len)) -
					    ixs->iphlen);
				/* this is a fail. */
				break;
			}

			KLIPS_PRINT(debug_tunnel & DB_TN_XMIT,
				    "klips_debug:ipsec_ocf_xmit_cb: "
				    "IPcomp on %u bytes failed, "
				    "using backup clone.\n",
				    (unsigned int)
				    (lsw_ip_hdr_version(ixs) == 6 ?
				     (ntohs(lsw_ip6_hdr(ixs)->payload_len) +
				      sizeof(struct ipv6hdr)) :
				     ntohs(lsw_ip4_hdr(ixs)->tot_len)) -
				    ixs->iphlen);

			ptr_delta = ixs->pre_ipcomp_skb->data - ixs->skb->data;
			ixs->iph = (void*)((char*)ixs->iph + ptr_delta);

			/*
			 * cannot free it here, because we are under
			 * IRQ, potentially, so queue it for later
			 */
			kfree_skb(ixs->skb);
			ixs->skb = ixs->pre_ipcomp_skb;
			ixs->pre_ipcomp_skb = NULL;

			skb_set_network_header(ixs->skb,
					       ipsec_skb_offset(ixs->skb,
								((void *)
								 skb_network_header(
									 ixs->skb))
								+
								ptr_delta));
			skb_set_transport_header(ixs->skb,
						 ipsec_skb_offset(ixs->skb,
								  ((void *)
								   skb_transport_header(
									   ixs->skb))
								  +
								  ptr_delta));
			KLIPS_IP_PRINT(debug_tunnel & DB_TN_XMIT, ixs->iph);

			/* this means we don't compress */
			ixs->state = IPSEC_XSM_CONT;
			break;
		}
		goto bail;
	}

	switch (ixs->ipsp->ips_said.proto) {
	case IPPROTO_ESP:
		/* ESP, nothing to do */
		break;

	case IPPROTO_AH:
		/* AH post processing, put back fields we had to zero */
		if (lsw_ip_hdr_version(ixs) == 4) {
			lsw_ip4_hdr(ixs)->ttl      = ixs->ttl;
			lsw_ip4_hdr(ixs)->check    = ixs->check;
			lsw_ip4_hdr(ixs)->frag_off = ixs->frag_off;
			lsw_ip4_hdr(ixs)->tos      = ixs->tos;
		}
		break;

	case IPPROTO_COMP:
		/* IPcomp fill in the header */
		crdc = crp->crp_desc;

		KLIPS_PRINT(debug_tunnel & DB_TN_XMIT,
			    "klips_debug:ipsec_ocf_xmit_cb: "
			    "after <%s%s%s>, SA:%s:\n",
			    IPS_XFORM_NAME(ixs->ipsp),
			    ixs->sa_len ? ixs->sa_txt : " (error)");
		KLIPS_IP_PRINT(debug_tunnel & DB_TN_XMIT, ixs->iph);

		orig_len = (lsw_ip_hdr_version(ixs) == 6 ?
			    (ntohs(lsw_ip6_hdr(ixs)->payload_len) +
			     sizeof(struct ipv6hdr)) :
			    ntohs(lsw_ip4_hdr(ixs)->tot_len)) -
			   ixs->iphlen;
		comp_len = crp->crp_olen;

		if (sysctl_ipsec_debug_ipcomp && sysctl_ipsec_debug_verbose) {
			ipsec_dmp_block("compress after",
					((unsigned char*)ixs->iph) + ixs->iphlen,
					comp_len);
		}

		newiph = (struct iphdr *)
			((char*)ixs->iph - sizeof(struct ipcomphdr));
		cmph = (struct ipcomphdr *)((char*)newiph + ixs->iphlen);

		/* move the ip header to make room for the new ipcomp header */
		memmove(((unsigned char *) ixs->skb->data) -
			sizeof(struct ipcomphdr),
			ixs->skb->data,
			(((unsigned char *) ixs->iph) + ixs->iphlen) -
			((unsigned char *) ixs->skb->data));
		/* DAVIDM check for head room */
		skb_push(ixs->skb, sizeof(struct ipcomphdr));

		ixs->iph = newiph;
		skb_set_network_header(ixs->skb,
				       ipsec_skb_offset(ixs->skb, newiph));
		skb_set_transport_header(ixs->skb,
					 ipsec_skb_offset(ixs->skb,
							  newiph) +
					 ixs->iphlen);

		/* now we can fill in the ipcomp header */
		cmph->ipcomp_nh = ixs->next_header;
		cmph->ipcomp_flags = 0;
		cmph->ipcomp_cpi =
			htons((__u16)(ntohl(ixs->ipsp->ips_said.spi) &
				      0x0000ffff));

		/* update the ip header to reflect the compression */
		if (lsw_ip_hdr_version(ixs) == 6) {
			lsw_ip6_hdr(ixs)->nexthdr = IPPROTO_COMP;
			lsw_ip6_hdr(ixs)->payload_len =
				htons(ixs->iphlen + sizeof(struct ipcomphdr) +
				      comp_len - sizeof(struct ipv6hdr));
		} else {
			lsw_ip4_hdr(ixs)->protocol = IPPROTO_COMP;
			lsw_ip4_hdr(ixs)->tot_len =
				htons(ixs->iphlen + sizeof(struct ipcomphdr) +
				      comp_len);
			lsw_ip4_hdr(ixs)->check = 0;
			lsw_ip4_hdr(ixs)->check =
				ip_fast_csum((char *) ixs->iph,
					     lsw_ip4_hdr(ixs)->ihl);
		}

		/* Update skb length/tail by "unputting" the shrinkage */
		safe_skb_put(ixs->skb, comp_len - orig_len);

		ixs->ipsp->ips_comp_adapt_skip = 0;
		ixs->ipsp->ips_comp_adapt_tries = 0;

		/* release the backup copy */
		if (ixs->pre_ipcomp_skb) {
			kfree_skb(ixs->pre_ipcomp_skb);
			ixs->pre_ipcomp_skb = NULL;
		}

		KLIPS_PRINT(debug_tunnel & DB_TN_XMIT,
			    "klips_debug:ipsec_ocf_xmit_cb: "
			    "after <%s%s%s>, SA:%s:\n",
			    IPS_XFORM_NAME(ixs->ipsp),
			    ixs->sa_len ? ixs->sa_txt : " (error)");
		KLIPS_IP_PRINT(debug_tunnel & DB_TN_XMIT, ixs->iph);
		break;
	}

	/* all good */
	ixs->state = IPSEC_XSM_CONT;

bail:
	crypto_freereq(crp);
	crp = NULL;
	ipsec_ocf_queue_task(ipsec_xsm, ixs);
	return 0;
}
Exemplo n.º 12
0
static int ipsec_ocf_rcv_cb(struct cryptop *crp)
{
	struct ipsec_rcv_state *irs =
		(struct ipsec_rcv_state *)crp->crp_opaque;
	struct iphdr *newiph;
	unsigned orig_len, decomp_len;
	struct cryptodesc *crdc = NULL;

	KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv_cb\n");

	if (irs == NULL) {
		KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv_cb: "
			    "NULL irs in callback\n");
		return 0;
	}

	/*
	 * we must update the state before returning to the state machine.
	 * if we have an error,  terminate the processing by moving to the DONE
	 * state
	 */

	irs->state = IPSEC_RSM_DONE; /* assume it went badly */

	if (crp->crp_etype) {
		ptrdiff_t ptr_delta;

		if (crp->crp_etype == EAGAIN) {
			/* Session has been migrated. Store the new session id and retry */
			KLIPS_PRINT(debug_rcv,
				    "klips_debug:ipsec_ocf_rcv_cb: crypto session migrated\n");
			irs->ipsp->ocf_cryptoid = crp->crp_sid;
			/* resubmit request */
			if (crypto_dispatch(crp) == 0)
				return 0;
			/* resubmit failed */
		}

		KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv_cb: "
			    "error in processing 0x%x\n", crp->crp_etype);

		switch (irs->ipsp->ips_said.proto) {
		case IPPROTO_COMP:
			/*
			 * we restore the previous skb on error and pretend nothing
			 * happened, just no compression
			 */
			ptr_delta = irs->pre_ipcomp_skb->data - irs->skb->data;
			irs->authenticator =
				(void*)((char*)irs->authenticator + ptr_delta);
			irs->iph           =
				(void*)((char*)irs->iph           + ptr_delta);

			kfree_skb(irs->skb);
			irs->skb = irs->pre_ipcomp_skb;
			irs->pre_ipcomp_skb = NULL;
			break;
		}

		goto bail;
	}

	switch (irs->ipsp->ips_said.proto) {
	case IPPROTO_ESP:
		/* ESP, process it */
		if (ipsec_rcv_esp_post_decrypt(irs) == IPSEC_RCV_OK) {
			/* this one came up good, set next state */
			irs->state = IPSEC_RSM_DECAP_CONT;
		}
		break;

	case IPPROTO_AH:
		/* AH post processing, put back fields we had to zero */
		if (lsw_ip_hdr_version(irs) == 4) {
			lsw_ip4_hdr(irs)->ttl      = irs->ttl;
			lsw_ip4_hdr(irs)->check    = irs->check;
			lsw_ip4_hdr(irs)->frag_off = irs->frag_off;
			lsw_ip4_hdr(irs)->tos      = irs->tos;
		}
		irs->state         = IPSEC_RSM_AUTH_CHK;

		/* pull up the IP header again after processing */
		skb_pull(irs->skb,
			 ((unsigned char *)irs->protostuff.ahstuff.ahp) -
			 ((unsigned char *)irs->iph));

		break;

	case IPPROTO_COMP:
		crdc = crp->crp_desc;

		KLIPS_PRINT(debug_rcv, "comp before adjustments:\n");
		KLIPS_IP_PRINT(debug_rcv & DB_TN_XMIT, irs->iph);

		orig_len = irs->skb->len - sizeof(struct ipcomphdr);
		decomp_len = crp->crp_olen;

		newiph = (struct iphdr*)
			((char*)irs->iph + sizeof(struct ipcomphdr));

		KLIPS_PRINT(debug_rcv,
			    "comp results: olen: %u, inject: %u (len=%d) iph->totlen=%u\n",
			    crp->crp_olen, crdc->crd_inject, decomp_len,
			    ntohs(newiph->tot_len));

		/*
		 * move the ip header to consume room previously taken by
		 * the ipcomp header
		 */
		skb_pull(irs->skb, sizeof(struct ipcomphdr));
		memmove(newiph, irs->iph, irs->iphlen);

		/* adjust the ipp pointer to point to the header we decoded */
		irs->iph = newiph;

		skb_set_network_header(irs->skb,
				       ipsec_skb_offset(irs->skb,
							((unsigned char *)
							skb_network_header(irs->skb))
								  +
								  sizeof(struct
									 ipcomphdr)));
		skb_set_transport_header(irs->skb,
					 ipsec_skb_offset(irs->skb,
							  ((unsigned char *)
							  skb_transport_header(irs->skb))
								    +
								    sizeof(struct
									    ipcomphdr)));

		if (lsw_ip_hdr_version(irs) == 6) {
			lsw_ip6_hdr(irs)->nexthdr  = irs->next_header;
		} else {
			lsw_ip4_hdr(irs)->protocol = irs->next_header;
			lsw_ip4_hdr(irs)->tot_len = htons(
				irs->iphlen + decomp_len);
			lsw_ip4_hdr(irs)->check = 0;
			lsw_ip4_hdr(irs)->check = ip_fast_csum(irs->iph, lsw_ip4_hdr(
								       irs)->ihl);
		}

		KLIPS_PRINT(debug_rcv, "comp after len adjustments:\n");
		KLIPS_IP_PRINT(debug_rcv & DB_TN_XMIT, irs->iph);

		/* Update skb length/tail by "putting" the growth */
		safe_skb_put(irs->skb, decomp_len - crp->crp_olen);

		/* set the new header in the skb */
		skb_set_network_header(irs->skb,
				       ipsec_skb_offset(irs->skb, irs->iph));
		KLIPS_IP_PRINT(debug_rcv & DB_RX_PKTRX, ip_hdr(irs->skb));

		/* relese the backup copy */
		if (irs->pre_ipcomp_skb) {
			kfree_skb(irs->pre_ipcomp_skb);
			irs->pre_ipcomp_skb = NULL;
		}

		/* IPcomp finished, continue processing */
		irs->state = IPSEC_RSM_DECAP_CONT;
		break;
	}

bail:
	crypto_freereq(crp);
	crp = NULL;
	ipsec_ocf_queue_task(ipsec_rsm, irs);
	return 0;
}
Exemplo n.º 13
0
static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
			    bool *csum_err)
{
	const struct gre_base_hdr *greh;
	__be32 *options;
	int hdr_len;

	if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
		return -EINVAL;

	greh = (struct gre_base_hdr *)skb_transport_header(skb);
	if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
		return -EINVAL;

	tpi->flags = gre_flags_to_tnl_flags(greh->flags);
	hdr_len = ip_gre_calc_hlen(tpi->flags);

	if (!pskb_may_pull(skb, hdr_len))
		return -EINVAL;

	greh = (struct gre_base_hdr *)skb_transport_header(skb);
	tpi->proto = greh->protocol;

	options = (__be32 *)(greh + 1);
	if (greh->flags & GRE_CSUM) {
		if (skb_checksum_simple_validate(skb)) {
			*csum_err = true;
			return -EINVAL;
		}

		skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
					 null_compute_pseudo);
		options++;
	}

	if (greh->flags & GRE_KEY) {
		tpi->key = *options;
		options++;
	} else {
		tpi->key = 0;
	}
	if (unlikely(greh->flags & GRE_SEQ)) {
		tpi->seq = *options;
		options++;
	} else {
		tpi->seq = 0;
	}
	/* WCCP version 1 and 2 protocol decoding.
	 * - Change protocol to IP
	 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
	 */
	if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
		tpi->proto = htons(ETH_P_IP);
		if ((*(u8 *)options & 0xF0) != 0x40) {
			hdr_len += 4;
			if (!pskb_may_pull(skb, hdr_len))
				return -EINVAL;
		}
	}
	return iptunnel_pull_header(skb, hdr_len, tpi->proto);
}
Exemplo n.º 14
0
static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
{
	int err;
	int extlen;
	struct ipv6hdr *top_iph;
	struct ip_auth_hdr *ah;
	struct ah_data *ahp;
	u8 nexthdr;
	char tmp_base[8];
	struct {
#ifdef CONFIG_IPV6_MIP6
		struct in6_addr saddr;
#endif
		struct in6_addr daddr;
		char hdrs[0];
	} *tmp_ext;

	top_iph = (struct ipv6hdr *)skb->data;
	top_iph->payload_len = htons(skb->len - sizeof(*top_iph));

	nexthdr = *skb_network_header(skb);
	*skb_network_header(skb) = IPPROTO_AH;

	/* When there are no extension headers, we only need to save the first
	 * 8 bytes of the base IP header.
	 */
	memcpy(tmp_base, top_iph, sizeof(tmp_base));

	tmp_ext = NULL;
	extlen = skb_transport_offset(skb) - sizeof(struct ipv6hdr);
	if (extlen) {
		extlen += sizeof(*tmp_ext);
		tmp_ext = kmalloc(extlen, GFP_ATOMIC);
		if (!tmp_ext) {
			err = -ENOMEM;
			goto error;
		}
#ifdef CONFIG_IPV6_MIP6
		memcpy(tmp_ext, &top_iph->saddr, extlen);
#else
		memcpy(tmp_ext, &top_iph->daddr, extlen);
#endif
		err = ipv6_clear_mutable_options(top_iph,
						 extlen - sizeof(*tmp_ext) +
						 sizeof(*top_iph),
						 XFRM_POLICY_OUT);
		if (err)
			goto error_free_iph;
	}

	ah = (struct ip_auth_hdr *)skb_transport_header(skb);
	ah->nexthdr = nexthdr;

	top_iph->priority    = 0;
	top_iph->flow_lbl[0] = 0;
	top_iph->flow_lbl[1] = 0;
	top_iph->flow_lbl[2] = 0;
	top_iph->hop_limit   = 0;

	ahp = x->data;
	ah->hdrlen  = (XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) +
				   ahp->icv_trunc_len) >> 2) - 2;

	ah->reserved = 0;
	ah->spi = x->id.spi;
	ah->seq_no = htonl(++x->replay.oseq);
	xfrm_aevent_doreplay(x);
	err = ah_mac_digest(ahp, skb, ah->auth_data);
	if (err)
		goto error_free_iph;
	memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len);

	err = 0;

	memcpy(top_iph, tmp_base, sizeof(tmp_base));
	if (tmp_ext) {
#ifdef CONFIG_IPV6_MIP6
		memcpy(&top_iph->saddr, tmp_ext, extlen);
#else
		memcpy(&top_iph->daddr, tmp_ext, extlen);
#endif
error_free_iph:
		kfree(tmp_ext);
	}

error:
	return err;
}
Exemplo n.º 15
0
static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
				       netdev_features_t features)
{
	int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
	struct sk_buff *segs = ERR_PTR(-EINVAL);
	u16 mac_offset = skb->mac_header;
	__be16 protocol = skb->protocol;
	u16 mac_len = skb->mac_len;
	int gre_offset, outer_hlen;
	bool need_csum, ufo, gso_partial;

	if (!skb->encapsulation)
		goto out;

	if (unlikely(tnl_hlen < sizeof(struct gre_base_hdr)))
		goto out;

	if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
		goto out;

	/* setup inner skb. */
	skb->encapsulation = 0;
	SKB_GSO_CB(skb)->encap_level = 0;
	__skb_pull(skb, tnl_hlen);
	skb_reset_mac_header(skb);
	skb_set_network_header(skb, skb_inner_network_offset(skb));
	skb->mac_len = skb_inner_network_offset(skb);
	skb->protocol = skb->inner_protocol;

	need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM);
	skb->encap_hdr_csum = need_csum;

	ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);

	features &= skb->dev->hw_enc_features;

	/* The only checksum offload we care about from here on out is the
	 * outer one so strip the existing checksum feature flags based
	 * on the fact that we will be computing our checksum in software.
	 */
	if (ufo) {
		features &= ~NETIF_F_CSUM_MASK;
		if (!need_csum)
			features |= NETIF_F_HW_CSUM;
	}

	/* segment inner packet. */
	segs = skb_mac_gso_segment(skb, features);
	if (IS_ERR_OR_NULL(segs)) {
		skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
				     mac_len);
		goto out;
	}

	gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);

	outer_hlen = skb_tnl_header_len(skb);
	gre_offset = outer_hlen - tnl_hlen;
	skb = segs;
	do {
		struct gre_base_hdr *greh;
		__sum16 *pcsum;

		/* Set up inner headers if we are offloading inner checksum */
		if (skb->ip_summed == CHECKSUM_PARTIAL) {
			skb_reset_inner_headers(skb);
			skb->encapsulation = 1;
		}

		skb->mac_len = mac_len;
		skb->protocol = protocol;

		__skb_push(skb, outer_hlen);
		skb_reset_mac_header(skb);
		skb_set_network_header(skb, mac_len);
		skb_set_transport_header(skb, gre_offset);

		if (!need_csum)
			continue;

		greh = (struct gre_base_hdr *)skb_transport_header(skb);
		pcsum = (__sum16 *)(greh + 1);

		if (gso_partial) {
			unsigned int partial_adj;

			/* Adjust checksum to account for the fact that
			 * the partial checksum is based on actual size
			 * whereas headers should be based on MSS size.
			 */
			partial_adj = skb->len + skb_headroom(skb) -
				      SKB_GSO_CB(skb)->data_offset -
				      skb_shinfo(skb)->gso_size;
			*pcsum = ~csum_fold((__force __wsum)htonl(partial_adj));
		} else {
			*pcsum = 0;
		}

		*(pcsum + 1) = 0;
		*pcsum = gso_make_checksum(skb, 0);
	} while ((skb = skb->next));
out:
	return segs;
}
Exemplo n.º 16
0
static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
			   void *from, size_t length,
			   struct rtable **rtp,
			   unsigned int flags)
{
	struct inet_sock *inet = inet_sk(sk);
	struct net *net = sock_net(sk);
	struct iphdr *iph;
	struct sk_buff *skb;
	unsigned int iphlen;
	int err;
	struct rtable *rt = *rtp;
	int hlen, tlen;

	if (length > rt->dst.dev->mtu) {
		ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
			       rt->dst.dev->mtu);
		return -EMSGSIZE;
	}
	if (flags&MSG_PROBE)
		goto out;

	hlen = LL_RESERVED_SPACE(rt->dst.dev);
	tlen = rt->dst.dev->needed_tailroom;
	skb = sock_alloc_send_skb(sk,
				  length + hlen + tlen + 15,
				  flags & MSG_DONTWAIT, &err);
	if (skb == NULL)
		goto error;
	skb_reserve(skb, hlen);

	skb->priority = sk->sk_priority;
	skb->mark = sk->sk_mark;
	skb_dst_set(skb, &rt->dst);
	*rtp = NULL;

	skb_reset_network_header(skb);
	iph = ip_hdr(skb);
	skb_put(skb, length);

	skb->ip_summed = CHECKSUM_NONE;

	skb->transport_header = skb->network_header;
	err = -EFAULT;
	if (memcpy_fromiovecend((void *)iph, from, 0, length))
		goto error_free;

	iphlen = iph->ihl * 4;

	err = -EINVAL;
	if (iphlen > length)
		goto error_free;

	if (iphlen >= sizeof(*iph)) {
		if (!iph->saddr)
			iph->saddr = fl4->saddr;
		iph->check   = 0;
		iph->tot_len = htons(length);
		if (!iph->id)
			ip_select_ident(iph, &rt->dst, NULL);

		iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
	}
	if (iph->protocol == IPPROTO_ICMP)
		icmp_out_count(net, ((struct icmphdr *)
			skb_transport_header(skb))->type);

	err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
		      rt->dst.dev, dst_output);
	if (err > 0)
		err = net_xmit_errno(err);
	if (err)
		goto error;
out:
	return 0;

error_free:
	kfree_skb(skb);
error:
	IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
	if (err == -ENOBUFS && !inet->recverr)
		err = 0;
	return err;
}
Exemplo n.º 17
0
/*
 *	Send a Neighbour Advertisement
 */
static void __ndisc_send(struct net_device *dev,
			 struct neighbour *neigh,
			 const struct in6_addr *daddr,
			 const struct in6_addr *saddr,
			 struct icmp6hdr *icmp6h, const struct in6_addr *target,
			 int llinfo)
{
	struct flowi fl;
	struct dst_entry *dst;
	struct net *net = dev_net(dev);
	struct sock *sk = net->ipv6.ndisc_sk;
	struct sk_buff *skb;
	struct icmp6hdr *hdr;
	struct inet6_dev *idev;
	int len;
	int err;
	u8 *opt, type;

	type = icmp6h->icmp6_type;

	icmpv6_flow_init(sk, &fl, type, saddr, daddr, dev->ifindex);

	dst = icmp6_dst_alloc(dev, neigh, daddr);
	if (!dst)
		return;

	err = xfrm_lookup(&dst, &fl, NULL, 0);
	if (err < 0)
		return;

	if (!dev->addr_len)
		llinfo = 0;

	len = sizeof(struct icmp6hdr) + (target ? sizeof(*target) : 0);
	if (llinfo)
		len += ndisc_opt_addr_space(dev);

	skb = sock_alloc_send_skb(sk,
				  (MAX_HEADER + sizeof(struct ipv6hdr) +
				   len + LL_ALLOCATED_SPACE(dev)),
				  1, &err);
	if (!skb) {
		ND_PRINTK0(KERN_ERR
			   "ICMPv6 ND: %s() failed to allocate an skb.\n",
			   __func__);
		dst_release(dst);
		return;
	}

	skb_reserve(skb, LL_RESERVED_SPACE(dev));
	ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len);

	skb->transport_header = skb->tail;
	skb_put(skb, len);

	hdr = (struct icmp6hdr *)skb_transport_header(skb);
	memcpy(hdr, icmp6h, sizeof(*hdr));

	opt = skb_transport_header(skb) + sizeof(struct icmp6hdr);
	if (target) {
		ipv6_addr_copy((struct in6_addr *)opt, target);
		opt += sizeof(*target);
	}

	if (llinfo)
		ndisc_fill_addr_option(opt, llinfo, dev->dev_addr,
				       dev->addr_len, dev->type);

	hdr->icmp6_cksum = csum_ipv6_magic(saddr, daddr, len,
					   IPPROTO_ICMPV6,
					   csum_partial((__u8 *) hdr,
							len, 0));

	skb->dst = dst;

	idev = in6_dev_get(dst->dev);
	IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS);

	err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
		      dst_output);
	if (!err) {
		ICMP6MSGOUT_INC_STATS(idev, type);
		ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS);
	}

	if (likely(idev != NULL))
		in6_dev_put(idev);
}
Exemplo n.º 18
0
Arquivo: flow.c Projeto: Milstein/ovs
static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
			int nh_len)
{
	struct icmp6hdr *icmp = icmp6_hdr(skb);

	/* The ICMPv6 type and code fields use the 16-bit transport port
	 * fields, so we need to store them in 16-bit network byte order.
	 */
	key->tp.src = htons(icmp->icmp6_type);
	key->tp.dst = htons(icmp->icmp6_code);

	if (icmp->icmp6_code == 0 &&
	    (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
	     icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
		int icmp_len = skb->len - skb_transport_offset(skb);
		struct nd_msg *nd;
		int offset;

		/* In order to process neighbor discovery options, we need the
		 * entire packet.
		 */
		if (unlikely(icmp_len < sizeof(*nd)))
			return 0;

		if (unlikely(skb_linearize(skb)))
			return -ENOMEM;

		nd = (struct nd_msg *)skb_transport_header(skb);
		key->ipv6.nd.target = nd->target;

		icmp_len -= sizeof(*nd);
		offset = 0;
		while (icmp_len >= 8) {
			struct nd_opt_hdr *nd_opt =
				 (struct nd_opt_hdr *)(nd->opt + offset);
			int opt_len = nd_opt->nd_opt_len * 8;

			if (unlikely(!opt_len || opt_len > icmp_len))
				return 0;

			/* Store the link layer address if the appropriate
			 * option is provided.  It is considered an error if
			 * the same link layer option is specified twice.
			 */
			if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
			    && opt_len == 8) {
				if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
					goto invalid;
				ether_addr_copy(key->ipv6.nd.sll,
				    &nd->opt[offset+sizeof(*nd_opt)]);
			} else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
				   && opt_len == 8) {
				if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
					goto invalid;
				ether_addr_copy(key->ipv6.nd.tll,
				    &nd->opt[offset+sizeof(*nd_opt)]);
			}

			icmp_len -= opt_len;
			offset += opt_len;
		}
	}

	return 0;

invalid:
	memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
	memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
	memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));

	return 0;
}
Exemplo n.º 19
0
static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
				       netdev_features_t features)
{
	struct sk_buff *segs = ERR_PTR(-EINVAL);
	netdev_features_t enc_features;
	int ghl;
	struct gre_base_hdr *greh;
	u16 mac_offset = skb->mac_header;
	int mac_len = skb->mac_len;
	__be16 protocol = skb->protocol;
	int tnl_hlen;
	bool csum;

	if (unlikely(skb_shinfo(skb)->gso_type &
				~(SKB_GSO_TCPV4 |
				  SKB_GSO_TCPV6 |
				  SKB_GSO_UDP |
				  SKB_GSO_DODGY |
				  SKB_GSO_TCP_ECN |
				  SKB_GSO_GRE |
				  SKB_GSO_IPIP)))
		goto out;

	if (unlikely(!pskb_may_pull(skb, sizeof(*greh))))
		goto out;

	greh = (struct gre_base_hdr *)skb_transport_header(skb);

	ghl = skb_inner_mac_header(skb) - skb_transport_header(skb);
	if (unlikely(ghl < sizeof(*greh)))
		goto out;

	csum = !!(greh->flags & GRE_CSUM);

	if (unlikely(!pskb_may_pull(skb, ghl)))
		goto out;

	/* setup inner skb. */
	skb->protocol = greh->protocol;
	skb->encapsulation = 0;

	__skb_pull(skb, ghl);
	skb_reset_mac_header(skb);
	skb_set_network_header(skb, skb_inner_network_offset(skb));
	skb->mac_len = skb_inner_network_offset(skb);

	/* segment inner packet. */
	enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
	segs = skb_mac_gso_segment(skb, enc_features);
	if (!segs || IS_ERR(segs)) {
		skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len);
		goto out;
	}

	skb = segs;
	tnl_hlen = skb_tnl_header_len(skb);
	do {
		__skb_push(skb, ghl);
		if (csum) {
			__be32 *pcsum;

			if (skb_has_shared_frag(skb)) {
				int err;

				err = __skb_linearize(skb);
				if (err) {
					kfree_skb_list(segs);
					segs = ERR_PTR(err);
					goto out;
				}
			}

			greh = (struct gre_base_hdr *)(skb->data);
			pcsum = (__be32 *)(greh + 1);
			*pcsum = 0;
			*(__sum16 *)pcsum = csum_fold(skb_checksum(skb, 0, skb->len, 0));
		}
		__skb_push(skb, tnl_hlen - ghl);

		skb_reset_inner_headers(skb);
		skb->encapsulation = 1;

		skb_reset_mac_header(skb);
		skb_set_network_header(skb, mac_len);
		skb->mac_len = mac_len;
		skb->protocol = protocol;
	} while ((skb = skb->next));
out:
	return segs;
}
Exemplo n.º 20
0
struct sk_buff *ndisc_build_skb(struct net_device *dev,
				const struct in6_addr *daddr,
				const struct in6_addr *saddr,
				struct icmp6hdr *icmp6h,
				const struct in6_addr *target,
				int llinfo)
{
	struct net *net = dev_net(dev);
	struct sock *sk = net->ipv6.ndisc_sk;
	struct sk_buff *skb;
	struct icmp6hdr *hdr;
	int hlen = LL_RESERVED_SPACE(dev);
	int tlen = dev->needed_tailroom;
	int len;
	u8 *opt;

	if (!dev->addr_len)
		llinfo = 0;

	len = sizeof(struct icmp6hdr) + (target ? sizeof(*target) : 0);
	if (llinfo)
		len += ndisc_opt_addr_space(dev);

	skb = alloc_skb((MAX_HEADER + sizeof(struct ipv6hdr) +
			 len + hlen + tlen), GFP_ATOMIC);
	if (!skb) {
		ND_PRINTK0(KERN_ERR
			   "ICMPv6 ND: %s() failed to allocate an skb.\n",
			   __func__);
		return NULL;
	}

	skb_reserve(skb, hlen);
	ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len);

	skb->transport_header = skb->tail;
	skb_put(skb, len);

	hdr = (struct icmp6hdr *)skb_transport_header(skb);
	memcpy(hdr, icmp6h, sizeof(*hdr));

	opt = skb_transport_header(skb) + sizeof(struct icmp6hdr);
	if (target) {
		*(struct in6_addr *)opt = *target;
		opt += sizeof(*target);
	}

	if (llinfo)
		ndisc_fill_addr_option(opt, llinfo, dev->dev_addr,
				       dev->addr_len, dev->type);

	hdr->icmp6_cksum = csum_ipv6_magic(saddr, daddr, len,
					   IPPROTO_ICMPV6,
					   csum_partial(hdr,
							len, 0));

	/* Manually assign socket ownership as we avoid calling
	 * sock_alloc_send_pskb() to bypass wmem buffer limits
	 */
	skb_set_owner_w(skb, sk);

	return skb;
}
Exemplo n.º 21
0
static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
				      struct net_device *netdev)
{
	struct ibmveth_adapter *adapter = netdev_priv(netdev);
	unsigned int desc_flags;
	union ibmveth_buf_desc descs[6];
	int last, i;
	int force_bounce = 0;
	dma_addr_t dma_addr;

	/*
	 * veth handles a maximum of 6 segments including the header, so
	 * we have to linearize the skb if there are more than this.
	 */
	if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
		netdev->stats.tx_dropped++;
		goto out;
	}

	/* veth can't checksum offload UDP */
	if (skb->ip_summed == CHECKSUM_PARTIAL &&
	    ((skb->protocol == htons(ETH_P_IP) &&
	      ip_hdr(skb)->protocol != IPPROTO_TCP) ||
	     (skb->protocol == htons(ETH_P_IPV6) &&
	      ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
	    skb_checksum_help(skb)) {

		netdev_err(netdev, "tx: failed to checksum packet\n");
		netdev->stats.tx_dropped++;
		goto out;
	}

	desc_flags = IBMVETH_BUF_VALID;

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		unsigned char *buf = skb_transport_header(skb) +
						skb->csum_offset;

		desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);

		/* Need to zero out the checksum */
		buf[0] = 0;
		buf[1] = 0;
	}

retry_bounce:
	memset(descs, 0, sizeof(descs));

	/*
	 * If a linear packet is below the rx threshold then
	 * copy it into the static bounce buffer. This avoids the
	 * cost of a TCE insert and remove.
	 */
	if (force_bounce || (!skb_is_nonlinear(skb) &&
				(skb->len < tx_copybreak))) {
		skb_copy_from_linear_data(skb, adapter->bounce_buffer,
					  skb->len);

		descs[0].fields.flags_len = desc_flags | skb->len;
		descs[0].fields.address = adapter->bounce_buffer_dma;

		if (ibmveth_send(adapter, descs)) {
			adapter->tx_send_failed++;
			netdev->stats.tx_dropped++;
		} else {
			netdev->stats.tx_packets++;
			netdev->stats.tx_bytes += skb->len;
		}

		goto out;
	}

	/* Map the header */
	dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
				  skb_headlen(skb), DMA_TO_DEVICE);
	if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
		goto map_failed;

	descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
	descs[0].fields.address = dma_addr;

	/* Map the frags */
	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

		dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
					    skb_frag_size(frag), DMA_TO_DEVICE);

		if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
			goto map_failed_frags;

		descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
		descs[i+1].fields.address = dma_addr;
	}

	if (ibmveth_send(adapter, descs)) {
		adapter->tx_send_failed++;
		netdev->stats.tx_dropped++;
	} else {
		netdev->stats.tx_packets++;
		netdev->stats.tx_bytes += skb->len;
	}

	dma_unmap_single(&adapter->vdev->dev,
			 descs[0].fields.address,
			 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
			 DMA_TO_DEVICE);

	for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
		dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
			       descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
			       DMA_TO_DEVICE);

out:
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;

map_failed_frags:
	last = i+1;
	for (i = 0; i < last; i++)
		dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
			       descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
			       DMA_TO_DEVICE);

map_failed:
	if (!firmware_has_feature(FW_FEATURE_CMO))
		netdev_err(netdev, "tx: unable to map xmit buffer\n");
	adapter->tx_map_failed++;
	skb_linearize(skb);
	force_bounce = 1;
	goto retry_bounce;
}
Exemplo n.º 22
0
static int start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
	int num, err;
	struct scatterlist sg[1+MAX_SKB_FRAGS];
	struct virtio_net_hdr *hdr;
	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;

	sg_init_table(sg, 1+MAX_SKB_FRAGS);

	pr_debug("%s: xmit %p " MAC_FMT "\n", dev->name, skb,
		 dest[0], dest[1], dest[2],
		 dest[3], dest[4], dest[5]);

	/* Encode metadata header at front. */
	hdr = skb_vnet_hdr(skb);
	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
		hdr->csum_start = skb->csum_start - skb_headroom(skb);
		hdr->csum_offset = skb->csum_offset;
	} else {
		hdr->flags = 0;
		hdr->csum_offset = hdr->csum_start = 0;
	}

	if (skb_is_gso(skb)) {
		hdr->hdr_len = skb_transport_header(skb) - skb->data;
		hdr->gso_size = skb_shinfo(skb)->gso_size;
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
			hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
			hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
			hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
		else
			BUG();
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
			hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
	} else {
		hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
		hdr->gso_size = hdr->hdr_len = 0;
	}

	vnet_hdr_to_sg(sg, skb);
	num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
	__skb_queue_head(&vi->send, skb);

again:
	/* Free up any pending old buffers before queueing new ones. */
	free_old_xmit_skbs(vi);
	err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
	if (err) {
		pr_debug("%s: virtio not prepared to send\n", dev->name);
		netif_stop_queue(dev);

		/* Activate callback for using skbs: if this returns false it
		 * means some were used in the meantime. */
		if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
			vi->svq->vq_ops->disable_cb(vi->svq);
			netif_start_queue(dev);
			goto again;
		}
		__skb_unlink(skb, &vi->send);

		return NETDEV_TX_BUSY;
	}
	vi->svq->vq_ops->kick(vi->svq);

	return 0;
}
Exemplo n.º 23
0
unsigned int hook_setpriority(unsigned int hooknum, struct sk_buff **skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *))
{
	
	sock_buff = *skb;
	unsigned short ip_len;
	struct sockaddr_in src_addr;
	struct udphdr *udp_header;
	struct tcphdr *tcp_header;
	
	//extract header info on incoming pkt
	
	ip_header = (struct iphdr *)skb_network_header(sock_buff);	//extract ip_header
	
	ip_len = ip_header->ihl * 4;				//no. of words of IP header
	
	unsigned short prot = ip_header->protocol;
	
	char *loopback = "lo";
	
	/*if(strcmp(ip_header->name,loopback) == 0)
	 { 
		 return NF_DROP; 
	 }*/
	
	//if(!sock_buff){ return NF_ACCEPT; }            //check       
  	//if(!(sock_buff->nh.iph)){ return NF_ACCEPT; }  //check
	
	memset(&src_addr, 0, sizeof(src_addr));
	//src_addr.sin_addr.s_addr = ip_header->saddr;			        //source ip address
	
	unsigned int sip = (unsigned int)ip_header->saddr;
	unsigned int dip = (unsigned int)ip_header->daddr;
	unsigned int sport = 0;
	unsigned int dport = 0;
	
	if (prot==17) {
       udp_header = (struct udphdr *)skb_transport_header(skb);
       sport = (unsigned int)ntohs(udp_header->source);
       dport = (unsigned int)ntohs(udp_header->dest);
   } else if (ip_header->protocol == 6) {
       tcp_header = (struct tcphdr *)skb_transport_header(skb);
       sport = (unsigned int)ntohs(tcp_header->source);
       dport = (unsigned int)ntohs(tcp_header->dest);
   }
	
	printk(KERN_INFO "OUT packet info: src ip: %u, src port: %u; dest ip: %u, dest port: %u; proto: %u\n", sip, sport, dip, dport, ip_header->protocol);
	
	//determine the class of packet from source
	char incoming_addr[16] ;
	char server_addr[16] = "172.16.0.5";
//	sprintf(incoming_addr,"%s",inet_ntoa(src_addr.sin_addr));

	
	if (sip == ip_address)
	
	if(isInRange("172.16.0.0","172.16.0.10",incoming_addr) == 1)			//Class A  Bit format : 000 001 XX
	{
		return NF_ACCEPT;
			
	}else 
	{
		if( strcmp(incoming_addr, server_addr) == 0)		//Class C  Bit format : 000 111 XX
		{
//			sprintf("to be dropped");
//			return NF_DROP;
			return NF_ACCEPT;
		}
	}
	
	unsigned char *port = "\x00\x50"; 
	
	//udp_header = (struct udphdr *)(sock_buff->data + (sock_buff->nh.iph->ihl *4)); 
	//if((udp_header->dest) == *(unsigned short*)port){ 
	//sprintf("to be dropped");
	//return NF_DROP; 
	//return NF_ACCEPT;
	//}
	

	return NF_ACCEPT;
	
}
Exemplo n.º 24
0
static unsigned int switch_hook_forward(
	unsigned int hook,
	struct sk_buff *skb,
	const struct net_device *dev_in,
	const struct net_device *dev_out,
	int (*okfn)(struct sk_buff *)
) {
	//   layer 2   //
	//-------------// skb->data
	//   layer 3   //
	unsigned int result = NF_ACCEPT;
	struct iphdr *ip_header;
	ip_header = ip_hdr(skb);
	if (ip_header->protocol == IPPROTO_TCP) {
		unsigned int ip_header_length;
		unsigned int ip_packet_length;
		ip_header_length = ip_hdrlen(skb);
		ip_packet_length = ntohs(ip_header->tot_len);
		if (ip_packet_length > MTU) {
			int unused;
			int remain;
			int length;
			int offset;
			int pstart;
			__be16 morefrag;
			struct sk_buff *skb_frag;
			struct iphdr *ip_header_frag;
			skb_push(skb, ETH_HLEN);
			//   layer 1   //
			//-------------// skb->data
			//   layer 2   //
			unused = LL_RESERVED_SPACE(skb->dev);
			remain = skb->len - ETH_HLEN - ip_header_length;
			offset = (ntohs(ip_header->frag_off) & IP_OFFSET) << 3;
			pstart = ETH_HLEN + ip_header_length;
			morefrag = ip_header->frag_off & htons(IP_MF);
			while (remain > 0) {
				length = remain > MTU ? MTU : remain;
				if ((skb_frag = alloc_skb(unused + ETH_HLEN + ip_header_length + length, GFP_ATOMIC)) == NULL) {
					break;
				}
				skb_frag->dev = skb->dev;
				skb_reserve(skb_frag, unused);
				skb_put(skb_frag, ETH_HLEN + ip_header_length + length);
				skb_frag->mac_header = skb_frag->data;
				skb_frag->network_header = skb_frag->data + ETH_HLEN;
				skb_frag->transport_header = skb_frag->data + ETH_HLEN + ip_header_length;
				skb_copy_from_linear_data(skb, skb_mac_header(skb_frag), ETH_HLEN + ip_header_length);
				skb_copy_bits(skb, pstart, skb_transport_header(skb_frag), length);
				remain = remain - length;
				//   layer 1   //
				//-------------// skb_frag->data
				//   layer 2   //
				skb_pull(skb_frag, ETH_HLEN);
				//   layer 2   //
				//-------------// skb_frag->data
				//   layer 3   //
				skb_reset_network_header(skb_frag);
				skb_pull(skb_frag, ip_header_length);
				//   layer 3   //
				//-------------// skb_frag->data
				//   layer 4   //
				skb_reset_transport_header(skb_frag);
				skb_push(skb_frag, ip_header_length);
				//   layer 2   //
				//-------------// skb_frag->data
				//   layer 3   //
				ip_header_frag = ip_hdr(skb_frag);
				ip_header_frag->frag_off = htons(offset >> 3);
				if (remain > 0 || morefrag) {
					ip_header_frag->frag_off = ip_header_frag->frag_off | htons(IP_MF);
				}
				ip_header_frag->frag_off = ip_header_frag->frag_off | htons(IP_DF);
				ip_header_frag->tot_len = htons(ip_header_length + length);
				ip_header_frag->protocol = IPPROTO_TCP;
				ip_send_check(ip_header_frag);
				skb_push(skb_frag, ETH_HLEN);
				//   layer 1   //
				//-------------// skb_frag->data
				//   layer 2   //
				dev_queue_xmit(skb_frag);
				pstart = pstart + length;
				offset = offset + length;
			}
			skb_pull(skb, ETH_HLEN);
			//   layer 2   //
			//-------------// skb->data
			//   layer 3   //
			result = NF_DROP;
		}
Exemplo n.º 25
0
#ifdef CONFIG_IPV6_PIMSM_V2
if (assert == MRT6MSG_WHOLEPKT) {
		/* Ugly, but we have no choice with this interface.
		   Duplicate old header, fix length etc.
		   And all this only to mangle msg->im6_msgtype and
		   to set msg->im6_mbz to "mbz" :-)
		 */
		skb_push(skb, -skb_network_offset(pkt));

		skb_push(skb, sizeof(*msg));
		skb_reset_transport_header(skb);
		msg = (struct mrt6msg *)skb_transport_header(skb);
		msg->im6_mbz = 0;
		msg->im6_msgtype = MRT6MSG_WHOLEPKT;
		msg->im6_mif = reg_vif_num;
		msg->im6_pad = 0;
		ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
		ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);

		skb->ip_summed = CHECKSUM_UNNECESSARY;
	} else
#endif
	{
	/*
	 *	Copy the IP header
	 */

	skb_put(skb, sizeof(struct ipv6hdr));
	skb_reset_network_header(skb);
	skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
Exemplo n.º 26
0
static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
			  struct net_device *dev)
{
	struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
	struct sk_buff *fp, *head = fq->q.fragments;
	int    payload_len;
	unsigned int nhoff;

	fq_kill(fq);

	
	if (prev) {
		head = prev->next;
		fp = skb_clone(head, GFP_ATOMIC);

		if (!fp)
			goto out_oom;

		fp->next = head->next;
		if (!fp->next)
			fq->q.fragments_tail = fp;
		prev->next = fp;

		skb_morph(head, fq->q.fragments);
		head->next = fq->q.fragments->next;

		kfree_skb(fq->q.fragments);
		fq->q.fragments = head;
	}

	WARN_ON(head == NULL);
	WARN_ON(FRAG6_CB(head)->offset != 0);

	
	payload_len = ((head->data - skb_network_header(head)) -
		       sizeof(struct ipv6hdr) + fq->q.len -
		       sizeof(struct frag_hdr));
	if (payload_len > IPV6_MAXPLEN)
		goto out_oversize;

	
	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
		goto out_oom;

	if (skb_has_frag_list(head)) {
		struct sk_buff *clone;
		int i, plen = 0;

		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
			goto out_oom;
		clone->next = head->next;
		head->next = clone;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_frag_list_init(head);
		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
		clone->len = clone->data_len = head->data_len - plen;
		head->data_len -= clone->len;
		head->len -= clone->len;
		clone->csum = 0;
		clone->ip_summed = head->ip_summed;
		atomic_add(clone->truesize, &fq->q.net->mem);
	}

	nhoff = fq->nhoffset;
	skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
	memmove(head->head + sizeof(struct frag_hdr), head->head,
		(head->data - head->head) - sizeof(struct frag_hdr));
	head->mac_header += sizeof(struct frag_hdr);
	head->network_header += sizeof(struct frag_hdr);

	skb_shinfo(head)->frag_list = head->next;
	skb_reset_transport_header(head);
	skb_push(head, head->data - skb_network_header(head));

	for (fp=head->next; fp; fp = fp->next) {
		head->data_len += fp->len;
		head->len += fp->len;
		if (head->ip_summed != fp->ip_summed)
			head->ip_summed = CHECKSUM_NONE;
		else if (head->ip_summed == CHECKSUM_COMPLETE)
			head->csum = csum_add(head->csum, fp->csum);
		head->truesize += fp->truesize;
	}
	atomic_sub(head->truesize, &fq->q.net->mem);

	head->next = NULL;
	head->dev = dev;
	head->tstamp = fq->q.stamp;
	ipv6_hdr(head)->payload_len = htons(payload_len);
	IP6CB(head)->nhoff = nhoff;

	
	if (head->ip_summed == CHECKSUM_COMPLETE)
		head->csum = csum_partial(skb_network_header(head),
					  skb_network_header_len(head),
					  head->csum);

	rcu_read_lock();
	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
	rcu_read_unlock();
	fq->q.fragments = NULL;
	fq->q.fragments_tail = NULL;
	return 1;

out_oversize:
	if (net_ratelimit())
		printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
	goto out_fail;
out_oom:
	if (net_ratelimit())
		printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
out_fail:
	rcu_read_lock();
	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
	rcu_read_unlock();
	return -1;
}
static void CVE_2015_2922_linux3_2_25_ndisc_router_discovery(struct sk_buff *skb)
{
	struct ra_msg *ra_msg = (struct ra_msg *)skb_transport_header(skb);
	struct neighbour *neigh = NULL;
	struct inet6_dev *in6_dev;
	struct rt6_info *rt = NULL;
	int lifetime;
	struct ndisc_options ndopts;
	int optlen;
	unsigned int pref = 0;

	__u8 * opt = (__u8 *)(ra_msg + 1);

	optlen = (skb->tail - skb->transport_header) - sizeof(struct ra_msg);

	if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
		ND_PRINTK2(KERN_WARNING
			   "ICMPv6 RA: source address is not link-local.\n");
		return;
	}
	if (optlen < 0) {
		ND_PRINTK2(KERN_WARNING
			   "ICMPv6 RA: packet too short\n");
		return;
	}

#ifdef CONFIG_IPV6_NDISC_NODETYPE
	if (skb->ndisc_nodetype == NDISC_NODETYPE_HOST) {
		ND_PRINTK2(KERN_WARNING
			   "ICMPv6 RA: from host or unauthorized router\n");
		return;
	}
#endif

	/*
	 *	set the RA_RECV flag in the interface
	 */

	in6_dev = __in6_dev_get(skb->dev);
	if (in6_dev == NULL) {
		ND_PRINTK0(KERN_ERR
			   "ICMPv6 RA: can't find inet6 device for %s.\n",
			   skb->dev->name);
		return;
	}

	if (!ndisc_parse_options(opt, optlen, &ndopts)) {
		ND_PRINTK2(KERN_WARNING
			   "ICMP6 RA: invalid ND options\n");
		return;
	}

	if (!accept_ra(in6_dev))
		goto skip_linkparms;

#ifdef CONFIG_IPV6_NDISC_NODETYPE
	/* skip link-specific parameters from interior routers */
	if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT)
		goto skip_linkparms;
#endif

	if (in6_dev->if_flags & IF_RS_SENT) {
		/*
		 *	flag that an RA was received after an RS was sent
		 *	out on this interface.
		 */
		in6_dev->if_flags |= IF_RA_RCVD;
	}

	/*
	 * Remember the managed/otherconf flags from most recently
	 * received RA message (RFC 2462) -- yoshfuji
	 */
	in6_dev->if_flags = (in6_dev->if_flags & ~(IF_RA_MANAGED |
				IF_RA_OTHERCONF)) |
				(ra_msg->icmph.icmp6_addrconf_managed ?
					IF_RA_MANAGED : 0) |
				(ra_msg->icmph.icmp6_addrconf_other ?
					IF_RA_OTHERCONF : 0);

	if (!in6_dev->cnf.accept_ra_defrtr)
		goto skip_defrtr;

	if (ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr, NULL, 0))
		goto skip_defrtr;

	lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);

#ifdef CONFIG_IPV6_ROUTER_PREF
	pref = ra_msg->icmph.icmp6_router_pref;
	/* 10b is handled as if it were 00b (medium) */
	if (pref == ICMPV6_ROUTER_PREF_INVALID ||
	    !in6_dev->cnf.accept_ra_rtr_pref)
		pref = ICMPV6_ROUTER_PREF_MEDIUM;
#endif

	rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev);

	if (rt)
		neigh = dst_get_neighbour(&rt->dst);

	if (rt && lifetime == 0) {
		neigh_clone(neigh);
		ip6_del_rt(rt);
		rt = NULL;
	}

	if (rt == NULL && lifetime) {
		ND_PRINTK3(KERN_DEBUG
			   "ICMPv6 RA: adding default router.\n");

		rt = rt6_add_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev, pref);
		if (rt == NULL) {
			ND_PRINTK0(KERN_ERR
				   "ICMPv6 RA: %s() failed to add default route.\n",
				   __func__);
			return;
		}

		neigh = dst_get_neighbour(&rt->dst);
		if (neigh == NULL) {
			ND_PRINTK0(KERN_ERR
				   "ICMPv6 RA: %s() got default router without neighbour.\n",
				   __func__);
			dst_release(&rt->dst);
			return;
		}
		neigh->flags |= NTF_ROUTER;
	} else if (rt) {
		rt->rt6i_flags = (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
	}

	if (rt)
		rt->rt6i_expires = jiffies + (HZ * lifetime);

	if (ra_msg->icmph.icmp6_hop_limit) {
		in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
		if (rt)
			dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
				       ra_msg->icmph.icmp6_hop_limit);
	}

skip_defrtr:

	/*
	 *	Update Reachable Time and Retrans Timer
	 */

	if (in6_dev->nd_parms) {
		unsigned long rtime = ntohl(ra_msg->retrans_timer);

		if (rtime && rtime/1000 < MAX_SCHEDULE_TIMEOUT/HZ) {
			rtime = (rtime*HZ)/1000;
			if (rtime < HZ/10)
				rtime = HZ/10;
			in6_dev->nd_parms->retrans_time = rtime;
			in6_dev->tstamp = jiffies;
			inet6_ifinfo_notify(RTM_NEWLINK, in6_dev);
		}

		rtime = ntohl(ra_msg->reachable_time);
		if (rtime && rtime/1000 < MAX_SCHEDULE_TIMEOUT/(3*HZ)) {
			rtime = (rtime*HZ)/1000;

			if (rtime < HZ/10)
				rtime = HZ/10;

			if (rtime != in6_dev->nd_parms->base_reachable_time) {
				in6_dev->nd_parms->base_reachable_time = rtime;
				in6_dev->nd_parms->gc_staletime = 3 * rtime;
				in6_dev->nd_parms->reachable_time = neigh_rand_reach_time(rtime);
				in6_dev->tstamp = jiffies;
				inet6_ifinfo_notify(RTM_NEWLINK, in6_dev);
			}
		}
	}

skip_linkparms:

	/*
	 *	Process options.
	 */

	if (!neigh)
		neigh = __neigh_lookup(&nd_tbl, &ipv6_hdr(skb)->saddr,
				       skb->dev, 1);
	if (neigh) {
		u8 *lladdr = NULL;
		if (ndopts.nd_opts_src_lladdr) {
			lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr,
						     skb->dev);
			if (!lladdr) {
				ND_PRINTK2(KERN_WARNING
					   "ICMPv6 RA: invalid link-layer address length\n");
				goto out;
			}
		}
		neigh_update(neigh, lladdr, NUD_STALE,
			     NEIGH_UPDATE_F_WEAK_OVERRIDE|
			     NEIGH_UPDATE_F_OVERRIDE|
			     NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
			     NEIGH_UPDATE_F_ISROUTER);
	}

	if (!accept_ra(in6_dev))
		goto out;

#ifdef CONFIG_IPV6_ROUTE_INFO
	if (ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr, NULL, 0))
		goto skip_routeinfo;

	if (in6_dev->cnf.accept_ra_rtr_pref && ndopts.nd_opts_ri) {
		struct nd_opt_hdr *p;
		for (p = ndopts.nd_opts_ri;
		     p;
		     p = ndisc_next_option(p, ndopts.nd_opts_ri_end)) {
			struct route_info *ri = (struct route_info *)p;
#ifdef CONFIG_IPV6_NDISC_NODETYPE
			if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT &&
			    ri->prefix_len == 0)
				continue;
#endif
			if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
				continue;
			rt6_route_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3,
				      &ipv6_hdr(skb)->saddr);
		}
	}

skip_routeinfo:
#endif

#ifdef CONFIG_IPV6_NDISC_NODETYPE
	/* skip link-specific ndopts from interior routers */
	if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT)
		goto out;
#endif

	if (in6_dev->cnf.accept_ra_pinfo && ndopts.nd_opts_pi) {
		struct nd_opt_hdr *p;
		for (p = ndopts.nd_opts_pi;
		     p;
		     p = ndisc_next_option(p, ndopts.nd_opts_pi_end)) {
			addrconf_prefix_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3);
		}
	}

	if (ndopts.nd_opts_mtu) {
		__be32 n;
		u32 mtu;

		memcpy(&n, ((u8*)(ndopts.nd_opts_mtu+1))+2, sizeof(mtu));
		mtu = ntohl(n);

		if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) {
			ND_PRINTK2(KERN_WARNING
				   "ICMPv6 RA: invalid mtu: %d\n",
				   mtu);
		} else if (in6_dev->cnf.mtu6 != mtu) {
			in6_dev->cnf.mtu6 = mtu;

			if (rt)
				dst_metric_set(&rt->dst, RTAX_MTU, mtu);

			rt6_mtu_change(skb->dev, mtu);
		}
	}

	if (ndopts.nd_useropts) {
		struct nd_opt_hdr *p;
		for (p = ndopts.nd_useropts;
		     p;
		     p = ndisc_next_useropt(p, ndopts.nd_useropts_end)) {
			ndisc_ra_useropt(skb, p);
		}
	}

	if (ndopts.nd_opts_tgt_lladdr || ndopts.nd_opts_rh) {
		ND_PRINTK2(KERN_WARNING
			   "ICMPv6 RA: invalid RA options");
	}
out:
	if (rt)
		dst_release(&rt->dst);
	else if (neigh)
		neigh_release(neigh);
}
Exemplo n.º 28
0
struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
				netdev_features_t features)
{
	struct sk_buff *segs = ERR_PTR(-EINVAL);
	unsigned int sum_truesize = 0;
	struct tcphdr *th;
	unsigned int thlen;
	unsigned int seq;
	__be32 delta;
	unsigned int oldlen;
	unsigned int mss;
	struct sk_buff *gso_skb = skb;
	__sum16 newcheck;
	bool ooo_okay, copy_destructor;

	th = tcp_hdr(skb);
	thlen = th->doff * 4;
	if (thlen < sizeof(*th))
		goto out;

	if (!pskb_may_pull(skb, thlen))
		goto out;

	oldlen = (u16)~skb->len;
	__skb_pull(skb, thlen);

	mss = tcp_skb_mss(skb);
	if (unlikely(skb->len <= mss))
		goto out;

	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
		/* Packet is from an untrusted source, reset gso_segs. */
		int type = skb_shinfo(skb)->gso_type;

		if (unlikely(type &
			     ~(SKB_GSO_TCPV4 |
			       SKB_GSO_DODGY |
			       SKB_GSO_TCP_ECN |
			       SKB_GSO_TCPV6 |
			       SKB_GSO_GRE |
			       SKB_GSO_GRE_CSUM |
			       SKB_GSO_IPIP |
			       SKB_GSO_SIT |
			       SKB_GSO_UDP_TUNNEL |
			       SKB_GSO_UDP_TUNNEL_CSUM |
			       SKB_GSO_TUNNEL_REMCSUM |
			       0) ||
			     !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
			goto out;

		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);

		segs = NULL;
		goto out;
	}

	copy_destructor = gso_skb->destructor == tcp_wfree;
	ooo_okay = gso_skb->ooo_okay;
	/* All segments but the first should have ooo_okay cleared */
	skb->ooo_okay = 0;

	segs = skb_segment(skb, features);
	if (IS_ERR(segs))
		goto out;

	/* Only first segment might have ooo_okay set */
	segs->ooo_okay = ooo_okay;

	delta = htonl(oldlen + (thlen + mss));

	skb = segs;
	th = tcp_hdr(skb);
	seq = ntohl(th->seq);

	if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
		tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);

	newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
					       (__force u32)delta));

	do {
		th->fin = th->psh = 0;
		th->check = newcheck;

		if (skb->ip_summed != CHECKSUM_PARTIAL)
			th->check = gso_make_checksum(skb, ~th->check);

		seq += mss;
		if (copy_destructor) {
			skb->destructor = gso_skb->destructor;
			skb->sk = gso_skb->sk;
			sum_truesize += skb->truesize;
		}
		skb = skb->next;
		th = tcp_hdr(skb);

		th->seq = htonl(seq);
		th->cwr = 0;
	} while (skb->next);

	/* Following permits TCP Small Queues to work well with GSO :
	 * The callback to TCP stack will be called at the time last frag
	 * is freed at TX completion, and not right now when gso_skb
	 * is freed by GSO engine
	 */
	if (copy_destructor) {
		swap(gso_skb->sk, skb->sk);
		swap(gso_skb->destructor, skb->destructor);
		sum_truesize += skb->truesize;
		atomic_add(sum_truesize - gso_skb->truesize,
			   &skb->sk->sk_wmem_alloc);
	}

	delta = htonl(oldlen + (skb_tail_pointer(skb) -
				skb_transport_header(skb)) +
		      skb->data_len);
	th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
				(__force u32)delta));
	if (skb->ip_summed != CHECKSUM_PARTIAL)
		th->check = gso_make_checksum(skb, ~th->check);
out:
	return segs;
}
Exemplo n.º 29
0
static int ipgre_rcv(struct sk_buff *skb)
{
	struct iphdr *iph;
	u8     *h;
	__be16    flags;
	__sum16   csum = 0;
	__be32 key = 0;
	u32    seqno = 0;
	struct ip_tunnel *tunnel;
	int    offset = 4;
	__be16 gre_proto;
	unsigned int len;

	if (!pskb_may_pull(skb, 16))
		goto drop_nolock;
	//内层的ip header....就是tunnel的ip地址了.
	iph = ip_hdr(skb);
	//data是传输层的数据...也就是gre的头...
	h = skb->data;
	flags = *(__be16*)h;

	if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
		/* - Version must be 0.
		   - We do not support routing headers.
		 */
		if (flags&(GRE_VERSION|GRE_ROUTING))
			goto drop_nolock;

		if (flags&GRE_CSUM) {
			switch (skb->ip_summed) {
			case CHECKSUM_COMPLETE:
				csum = csum_fold(skb->csum);
				if (!csum)
					break;
				/* fall through */
			case CHECKSUM_NONE:
				skb->csum = 0;
				csum = __skb_checksum_complete(skb);
				skb->ip_summed = CHECKSUM_COMPLETE;
			}
			offset += 4;
		}
		if (flags&GRE_KEY) {
			key = *(__be32*)(h + offset);
			offset += 4;
		}
		if (flags&GRE_SEQ) {
			seqno = ntohl(*(__be32*)(h + offset));
			offset += 4;
		}
	}

	gre_proto = *(__be16 *)(h + 2);

	read_lock(&ipgre_lock);
	if ((tunnel = ipgre_tunnel_lookup(skb->dev,
					  iph->saddr, iph->daddr, key,
					  gre_proto))) {
		struct net_device_stats *stats = &tunnel->dev->stats;

		secpath_reset(skb);

		skb->protocol = gre_proto;
		/* WCCP version 1 and 2 protocol decoding.
		 * - Change protocol to IP
		 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
		 */
		if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
			skb->protocol = htons(ETH_P_IP);
			if ((*(h + offset) & 0xF0) != 0x40)
				offset += 4;
		}

		skb->mac_header = skb->network_header;
		__pskb_pull(skb, offset);
		skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
		skb->pkt_type = PACKET_HOST;
#ifdef CONFIG_NET_IPGRE_BROADCAST
		if (ipv4_is_multicast(iph->daddr)) {
			/* Looped back packet, drop it! */
			if (skb->rtable->fl.iif == 0)
				goto drop;
			stats->multicast++;
			skb->pkt_type = PACKET_BROADCAST;
		}
#endif

		if (((flags&GRE_CSUM) && csum) ||
		    (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
			stats->rx_crc_errors++;
			stats->rx_errors++;
			goto drop;
		}
		if (tunnel->parms.i_flags&GRE_SEQ) {
			if (!(flags&GRE_SEQ) ||
			    (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
				stats->rx_fifo_errors++;
				stats->rx_errors++;
				goto drop;
			}
			tunnel->i_seqno = seqno + 1;
		}

		len = skb->len;

		/* Warning: All skb pointers will be invalidated! */
		if (tunnel->dev->type == ARPHRD_ETHER) {
			if (!pskb_may_pull(skb, ETH_HLEN)) {
				stats->rx_length_errors++;
				stats->rx_errors++;
				goto drop;
			}

			iph = ip_hdr(skb);
			skb->protocol = eth_type_trans(skb, tunnel->dev);
			skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
		}

		stats->rx_packets++;
		stats->rx_bytes += len;
		skb->dev = tunnel->dev;
		dst_release(skb->dst);
		skb->dst = NULL;
		nf_reset(skb);

		skb_reset_network_header(skb);
		ipgre_ecn_decapsulate(iph, skb);
		//这里是最重要的...gre把数据包处理之后...再次把数据包处理一遍.!
		//已经设置好了skb->protocol 和各个字段..(相当于重新获取一个数据包.)
		netif_rx(skb);
		read_unlock(&ipgre_lock);
		return(0);
	}
	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);

drop:
	read_unlock(&ipgre_lock);
drop_nolock:
	kfree_skb(skb);
	return(0);
}
Exemplo n.º 30
0
static int xenvif_tx_submit(struct xenvif *vif)
{
	struct gnttab_copy *gop = vif->tx_copy_ops;
	struct sk_buff *skb;
	int work_done = 0;

	while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
		struct xen_netif_tx_request *txp;
		u16 pending_idx;
		unsigned data_len;

		pending_idx = *((u16 *)skb->data);
		txp = &vif->pending_tx_info[pending_idx].req;

		/* Check the remap error code. */
		if (unlikely(xenvif_tx_check_gop(vif, skb, &gop))) {
			netdev_dbg(vif->dev, "netback grant failed.\n");
			skb_shinfo(skb)->nr_frags = 0;
			kfree_skb(skb);
			continue;
		}

		data_len = skb->len;
		memcpy(skb->data,
		       (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
		       data_len);
		if (data_len < txp->size) {
			/* Append the packet payload as a fragment. */
			txp->offset += data_len;
			txp->size -= data_len;
		} else {
			/* Schedule a response immediately. */
			xenvif_idx_release(vif, pending_idx,
					   XEN_NETIF_RSP_OKAY);
		}

		if (txp->flags & XEN_NETTXF_csum_blank)
			skb->ip_summed = CHECKSUM_PARTIAL;
		else if (txp->flags & XEN_NETTXF_data_validated)
			skb->ip_summed = CHECKSUM_UNNECESSARY;

		xenvif_fill_frags(vif, skb);

		if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
			int target = min_t(int, skb->len, PKT_PROT_LEN);
			__pskb_pull_tail(skb, target - skb_headlen(skb));
		}

		skb->dev      = vif->dev;
		skb->protocol = eth_type_trans(skb, skb->dev);
		skb_reset_network_header(skb);

		if (checksum_setup(vif, skb)) {
			netdev_dbg(vif->dev,
				   "Can't setup checksum in net_tx_action\n");
			kfree_skb(skb);
			continue;
		}

		skb_probe_transport_header(skb, 0);

		/* If the packet is GSO then we will have just set up the
		 * transport header offset in checksum_setup so it's now
		 * straightforward to calculate gso_segs.
		 */
		if (skb_is_gso(skb)) {
			int mss = skb_shinfo(skb)->gso_size;
			int hdrlen = skb_transport_header(skb) -
				skb_mac_header(skb) +
				tcp_hdrlen(skb);

			skb_shinfo(skb)->gso_segs =
				DIV_ROUND_UP(skb->len - hdrlen, mss);
		}

		vif->dev->stats.rx_bytes += skb->len;
		vif->dev->stats.rx_packets++;

		work_done++;

		netif_receive_skb(skb);
	}