Exemplo n.º 1
0
static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
{
	struct hci_dev *hdev;
	struct sk_buff *skb;
	__u8 dev_type;

	if (data->hdev)
		return -EBADFD;

	/* bits 0-1 are dev_type (Primary or AMP) */
	dev_type = opcode & 0x03;

	if (dev_type != HCI_PRIMARY && dev_type != HCI_AMP)
		return -EINVAL;

	/* bits 2-5 are reserved (must be zero) */
	if (opcode & 0x3c)
		return -EINVAL;

	skb = bt_skb_alloc(4, GFP_KERNEL);
	if (!skb)
		return -ENOMEM;

	hdev = hci_alloc_dev();
	if (!hdev) {
		kfree_skb(skb);
		return -ENOMEM;
	}

	data->hdev = hdev;

	hdev->bus = HCI_VIRTUAL;
	hdev->dev_type = dev_type;
	hci_set_drvdata(hdev, data);

	hdev->open  = vhci_open_dev;
	hdev->close = vhci_close_dev;
	hdev->flush = vhci_flush;
	hdev->send  = vhci_send_frame;

	/* bit 6 is for external configuration */
	if (opcode & 0x40)
		set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);

	/* bit 7 is for raw device */
	if (opcode & 0x80)
		set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);

	if (hci_register_dev(hdev) < 0) {
		BT_ERR("Can't register HCI device");
		hci_free_dev(hdev);
		data->hdev = NULL;
		kfree_skb(skb);
		return -EBUSY;
	}

	hci_skb_pkt_type(skb) = HCI_VENDOR_PKT;

	skb_put_u8(skb, 0xff);
	skb_put_u8(skb, opcode);
	put_unaligned_le16(hdev->id, skb_put(skb, 2));
	skb_queue_tail(&data->readq, skb);

	wake_up_interruptible(&data->read_wait);
	return 0;
}
static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
			     struct sk_buff *skb, const u8 frag_type)
{
	struct sk_buff *prev, *next;
	struct net_device *dev;
	int end, offset;

	if (fq->q.flags & INET_FRAG_COMPLETE)
		goto err;

	offset = lowpan_cb(skb)->d_offset << 3;
	end = lowpan_cb(skb)->d_size;

	/* Is this the final fragment? */
	if (offset + skb->len == end) {
		/* If we already have some bits beyond end
		 * or have different end, the segment is corrupted.
		 */
		if (end < fq->q.len ||
		    ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
			goto err;
		fq->q.flags |= INET_FRAG_LAST_IN;
		fq->q.len = end;
	} else {
		if (end > fq->q.len) {
			/* Some bits beyond end -> corruption. */
			if (fq->q.flags & INET_FRAG_LAST_IN)
				goto err;
			fq->q.len = end;
		}
	}

	/* Find out which fragments are in front and at the back of us
	 * in the chain of fragments so far.  We must know where to put
	 * this fragment, right?
	 */
	prev = fq->q.fragments_tail;
	if (!prev || lowpan_cb(prev)->d_offset < lowpan_cb(skb)->d_offset) {
		next = NULL;
		goto found;
	}
	prev = NULL;
	for (next = fq->q.fragments; next != NULL; next = next->next) {
		if (lowpan_cb(next)->d_offset >= lowpan_cb(skb)->d_offset)
			break;	/* bingo! */
		prev = next;
	}

found:
	/* Insert this fragment in the chain of fragments. */
	skb->next = next;
	if (!next)
		fq->q.fragments_tail = skb;
	if (prev)
		prev->next = skb;
	else
		fq->q.fragments = skb;

	dev = skb->dev;
	if (dev)
		skb->dev = NULL;

	fq->q.stamp = skb->tstamp;
	if (frag_type == LOWPAN_DISPATCH_FRAG1) {
		/* Calculate uncomp. 6lowpan header to estimate full size */
		fq->q.meat += lowpan_uncompress_size(skb, NULL);
		fq->q.flags |= INET_FRAG_FIRST_IN;
	} else {
		fq->q.meat += skb->len;
	}
	add_frag_mem_limit(&fq->q, skb->truesize);

	if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
	    fq->q.meat == fq->q.len) {
		int res;
		unsigned long orefdst = skb->_skb_refdst;

		skb->_skb_refdst = 0UL;
		res = lowpan_frag_reasm(fq, prev, dev);
		skb->_skb_refdst = orefdst;
		return res;
	}

	return -1;
err:
	kfree_skb(skb);
	return -1;
}
Exemplo n.º 3
0
int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
	     struct ipv6_txoptions *opt, int ipfragok)
{
	struct ipv6_pinfo *np = sk ? inet6_sk(sk) : NULL;
	struct in6_addr *first_hop = &fl->fl6_dst;
	struct dst_entry *dst = skb->dst;
	struct ipv6hdr *hdr;
	u8  proto = fl->proto;
	int seg_len = skb->len;
	int hlimit, tclass;
	u32 mtu;

	if (opt) {
		int head_room;

		/* First: exthdrs may take lots of space (~8K for now)
		   MAX_HEADER is not enough.
		 */
		head_room = opt->opt_nflen + opt->opt_flen;
		seg_len += head_room;
		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);

		if (skb_headroom(skb) < head_room) {
			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
			kfree_skb(skb);
			skb = skb2;
			if (skb == NULL) {	
				IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
				return -ENOBUFS;
			}
			if (sk)
				skb_set_owner_w(skb, sk);
		}
		if (opt->opt_flen)
			ipv6_push_frag_opts(skb, opt, &proto);
		if (opt->opt_nflen)
			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
	}

	hdr = skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, sizeof(struct ipv6hdr));

	/*
	 *	Fill in the IPv6 header
	 */

	hlimit = -1;
	if (np)
		hlimit = np->hop_limit;
	if (hlimit < 0)
		hlimit = dst_metric(dst, RTAX_HOPLIMIT);
	if (hlimit < 0)
		hlimit = ipv6_get_hoplimit(dst->dev);

	tclass = -1;
	if (np)
		tclass = np->tclass;
	if (tclass < 0)
		tclass = 0;

	*(u32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;

	hdr->payload_len = htons(seg_len);
	hdr->nexthdr = proto;
	hdr->hop_limit = hlimit;

	ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
	ipv6_addr_copy(&hdr->daddr, first_hop);

	mtu = dst_mtu(dst);
	if ((skb->len <= mtu) || ipfragok) {
		IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
		return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
				dst_output);
	}

	if (net_ratelimit())
		printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
	skb->dev = dst->dev;
	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
	IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
	kfree_skb(skb);
	return -EMSGSIZE;
}
Exemplo n.º 4
0
int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
	     struct ipv6_txoptions *opt, int ipfragok)
{
	struct net *net = sock_net(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct in6_addr *first_hop = &fl->fl6_dst;
	struct dst_entry *dst = skb_dst(skb);
	struct ipv6hdr *hdr;
	u8  proto = fl->proto;
	int seg_len = skb->len;
	int hlimit = -1;
	int tclass = 0;
	u32 mtu;

	if (opt) {
		unsigned int head_room;

		/* First: exthdrs may take lots of space (~8K for now)
		   MAX_HEADER is not enough.
		 */
		head_room = opt->opt_nflen + opt->opt_flen;
		seg_len += head_room;
		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);

		if (skb_headroom(skb) < head_room) {
			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
			if (skb2 == NULL) {
				IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
					      IPSTATS_MIB_OUTDISCARDS);
				kfree_skb(skb);
				return -ENOBUFS;
			}
			kfree_skb(skb);
			skb = skb2;
			if (sk)
				skb_set_owner_w(skb, sk);
		}
		if (opt->opt_flen)
			ipv6_push_frag_opts(skb, opt, &proto);
		if (opt->opt_nflen)
			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
	}

	skb_push(skb, sizeof(struct ipv6hdr));
	skb_reset_network_header(skb);
	hdr = ipv6_hdr(skb);

	/* Allow local fragmentation. */
	if (ipfragok)
		skb->local_df = 1;

	/*
	 *	Fill in the IPv6 header
	 */
	if (np) {
		tclass = np->tclass;
		hlimit = np->hop_limit;
	}
	if (hlimit < 0)
		hlimit = ip6_dst_hoplimit(dst);

	*(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;

	hdr->payload_len = htons(seg_len);
	hdr->nexthdr = proto;
	hdr->hop_limit = hlimit;

	ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
	ipv6_addr_copy(&hdr->daddr, first_hop);

	skb->priority = sk->sk_priority;
	skb->mark = sk->sk_mark;

	mtu = dst_mtu(dst);
	if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
		IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
			      IPSTATS_MIB_OUT, skb->len);
		return NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
				dst_output);
	}

	if (net_ratelimit())
		printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
	skb->dev = dst->dev;
	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
	kfree_skb(skb);
	return -EMSGSIZE;
}
Exemplo n.º 5
0
static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
{
	struct sk_buff *frag;
	struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
	struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
	struct ipv6hdr *tmp_hdr;
	struct frag_hdr *fh;
	unsigned int mtu, hlen, left, len;
	__be32 frag_id = 0;
	int ptr, offset = 0, err=0;
	u8 *prevhdr, nexthdr = 0;
	struct net *net = dev_net(skb_dst(skb)->dev);

	hlen = ip6_find_1stfragopt(skb, &prevhdr);
	nexthdr = *prevhdr;

	mtu = ip6_skb_dst_mtu(skb);

	/* We must not fragment if the socket is set to force MTU discovery
	 * or if the skb it not generated by a local socket.
	 */
	if (!skb->local_df) {
		skb->dev = skb_dst(skb)->dev;
		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
			      IPSTATS_MIB_FRAGFAILS);
		kfree_skb(skb);
		return -EMSGSIZE;
	}

	if (np && np->frag_size < mtu) {
		if (np->frag_size)
			mtu = np->frag_size;
	}
	mtu -= hlen + sizeof(struct frag_hdr);

	if (skb_has_frags(skb)) {
		int first_len = skb_pagelen(skb);
		int truesizes = 0;

		if (first_len - hlen > mtu ||
		    ((first_len - hlen) & 7) ||
		    skb_cloned(skb))
			goto slow_path;

		skb_walk_frags(skb, frag) {
			/* Correct geometry. */
			if (frag->len > mtu ||
			    ((frag->len & 7) && frag->next) ||
			    skb_headroom(frag) < hlen)
			    goto slow_path;

			/* Partially cloned skb? */
			if (skb_shared(frag))
				goto slow_path;

			BUG_ON(frag->sk);
			if (skb->sk) {
				frag->sk = skb->sk;
				frag->destructor = sock_wfree;
				truesizes += frag->truesize;
			}
		}

		err = 0;
		offset = 0;
		frag = skb_shinfo(skb)->frag_list;
		skb_frag_list_init(skb);
		/* BUILD HEADER */

		*prevhdr = NEXTHDR_FRAGMENT;
		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
		if (!tmp_hdr) {
			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
				      IPSTATS_MIB_FRAGFAILS);
			return -ENOMEM;
		}

		__skb_pull(skb, hlen);
		fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
		__skb_push(skb, hlen);
		skb_reset_network_header(skb);
		memcpy(skb_network_header(skb), tmp_hdr, hlen);

		ipv6_select_ident(fh);
		fh->nexthdr = nexthdr;
		fh->reserved = 0;
		fh->frag_off = htons(IP6_MF);
		frag_id = fh->identification;

		first_len = skb_pagelen(skb);
		skb->data_len = first_len - skb_headlen(skb);
		skb->truesize -= truesizes;
		skb->len = first_len;
		ipv6_hdr(skb)->payload_len = htons(first_len -
						   sizeof(struct ipv6hdr));

		dst_hold(&rt->u.dst);

		for (;;) {
			/* Prepare header of the next frame,
			 * before previous one went down. */
			if (frag) {
				frag->ip_summed = CHECKSUM_NONE;
				skb_reset_transport_header(frag);
				fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
				__skb_push(frag, hlen);
				skb_reset_network_header(frag);
				memcpy(skb_network_header(frag), tmp_hdr,
				       hlen);
				offset += skb->len - hlen - sizeof(struct frag_hdr);
				fh->nexthdr = nexthdr;
				fh->reserved = 0;
				fh->frag_off = htons(offset);
				if (frag->next != NULL)
					fh->frag_off |= htons(IP6_MF);
				fh->identification = frag_id;
				ipv6_hdr(frag)->payload_len =
						htons(frag->len -
						      sizeof(struct ipv6hdr));
				ip6_copy_metadata(frag, skb);
			}

			err = output(skb);
			if(!err)
				IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst),
					      IPSTATS_MIB_FRAGCREATES);

			if (err || !frag)
				break;

			skb = frag;
			frag = skb->next;
			skb->next = NULL;
		}

		kfree(tmp_hdr);

		if (err == 0) {
			IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst),
				      IPSTATS_MIB_FRAGOKS);
			dst_release(&rt->u.dst);
			return 0;
		}

		while (frag) {
			skb = frag->next;
			kfree_skb(frag);
			frag = skb;
		}

		IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst),
			      IPSTATS_MIB_FRAGFAILS);
		dst_release(&rt->u.dst);
		return err;
	}

slow_path:
	left = skb->len - hlen;		/* Space per frame */
	ptr = hlen;			/* Where to start from */

	/*
	 *	Fragment the datagram.
	 */

	*prevhdr = NEXTHDR_FRAGMENT;

	/*
	 *	Keep copying data until we run out.
	 */
	while(left > 0)	{
		len = left;
		/* IF: it doesn't fit, use 'mtu' - the data space left */
		if (len > mtu)
			len = mtu;
		/* IF: we are not sending upto and including the packet end
		   then align the next start on an eight byte boundary */
		if (len < left)	{
			len &= ~7;
		}
		/*
		 *	Allocate buffer.
		 */

		if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
			NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
				      IPSTATS_MIB_FRAGFAILS);
			err = -ENOMEM;
			goto fail;
		}

		/*
		 *	Set up data on packet
		 */

		ip6_copy_metadata(frag, skb);
		skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
		skb_reset_network_header(frag);
		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
		frag->transport_header = (frag->network_header + hlen +
					  sizeof(struct frag_hdr));

		/*
		 *	Charge the memory for the fragment to any owner
		 *	it might possess
		 */
		if (skb->sk)
			skb_set_owner_w(frag, skb->sk);

		/*
		 *	Copy the packet header into the new buffer.
		 */
		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);

		/*
		 *	Build fragment header.
		 */
		fh->nexthdr = nexthdr;
		fh->reserved = 0;
		if (!frag_id) {
			ipv6_select_ident(fh);
			frag_id = fh->identification;
		} else
			fh->identification = frag_id;

		/*
		 *	Copy a block of the IP datagram.
		 */
		if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
			BUG();
		left -= len;

		fh->frag_off = htons(offset);
		if (left > 0)
			fh->frag_off |= htons(IP6_MF);
		ipv6_hdr(frag)->payload_len = htons(frag->len -
						    sizeof(struct ipv6hdr));

		ptr += len;
		offset += len;

		/*
		 *	Put this fragment into the sending queue.
		 */
		err = output(frag);
		if (err)
			goto fail;

		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
			      IPSTATS_MIB_FRAGCREATES);
	}
	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
		      IPSTATS_MIB_FRAGOKS);
	kfree_skb(skb);
	return err;

fail:
	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
		      IPSTATS_MIB_FRAGFAILS);
	kfree_skb(skb);
	return err;
}
Exemplo n.º 6
0
/*
 *	All outgoing AX.25 I frames pass via this routine. Therefore this is
 *	where the fragmentation of frames takes place. If fragment is set to
 *	zero then we are not allowed to do fragmentation, even if the frame
 *	is too large.
 */
void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
{
	struct sk_buff *skbn;
	unsigned char *p;
	int frontlen, len, fragno, ka9qfrag, first = 1;

	if (paclen < 16) {
		WARN_ON_ONCE(1);
		kfree_skb(skb);
		return;
	}

	if ((skb->len - 1) > paclen) {
		if (*skb->data == AX25_P_TEXT) {
			skb_pull(skb, 1); /* skip PID */
			ka9qfrag = 0;
		} else {
			paclen -= 2;	/* Allow for fragment control info */
			ka9qfrag = 1;
		}

		fragno = skb->len / paclen;
		if (skb->len % paclen == 0) fragno--;

		frontlen = skb_headroom(skb);	/* Address space + CTRL */

		while (skb->len > 0) {
			spin_lock_bh(&ax25_frag_lock);
			if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) {
				spin_unlock_bh(&ax25_frag_lock);
				printk(KERN_CRIT "AX.25: ax25_output - out of memory\n");
				return;
			}

			if (skb->sk != NULL)
				skb_set_owner_w(skbn, skb->sk);

			spin_unlock_bh(&ax25_frag_lock);

			len = (paclen > skb->len) ? skb->len : paclen;

			if (ka9qfrag == 1) {
				skb_reserve(skbn, frontlen + 2);
				skb_set_network_header(skbn,
						      skb_network_offset(skb));
				skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
				p = skb_push(skbn, 2);

				*p++ = AX25_P_SEGMENT;

				*p = fragno--;
				if (first) {
					*p |= AX25_SEG_FIRST;
					first = 0;
				}
			} else {
				skb_reserve(skbn, frontlen + 1);
				skb_set_network_header(skbn,
						      skb_network_offset(skb));
				skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
				p = skb_push(skbn, 1);
				*p = AX25_P_TEXT;
			}

			skb_pull(skb, len);
			skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */
		}

		kfree_skb(skb);
	} else {
		skb_queue_tail(&ax25->write_queue, skb);	  /* Throw it on the queue */
	}

	switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
	case AX25_PROTO_STD_SIMPLEX:
	case AX25_PROTO_STD_DUPLEX:
		ax25_kick(ax25);
		break;

#ifdef CONFIG_AX25_DAMA_SLAVE
	/*
	 * A DAMA slave is _required_ to work as normal AX.25L2V2
	 * if no DAMA master is available.
	 */
	case AX25_PROTO_DAMA_SLAVE:
		if (!ax25->ax25_dev->dama.slave) ax25_kick(ax25);
		break;
#endif
	}
}
Exemplo n.º 7
0
static inline int ip6_ufo_append_data(struct sock *sk,
			int getfrag(void *from, char *to, int offset, int len,
			int odd, struct sk_buff *skb),
			void *from, int length, int hh_len, int fragheaderlen,
			int transhdrlen, int mtu,unsigned int flags)

{
	struct sk_buff *skb;
	int err;

	/* There is support for UDP large send offload by network
	 * device, so create one single skb packet containing complete
	 * udp datagram
	 */
	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
		skb = sock_alloc_send_skb(sk,
			hh_len + fragheaderlen + transhdrlen + 20,
			(flags & MSG_DONTWAIT), &err);
		if (skb == NULL)
			return -ENOMEM;

		/* reserve space for Hardware header */
		skb_reserve(skb, hh_len);

		/* create space for UDP/IP header */
		skb_put(skb,fragheaderlen + transhdrlen);

		/* initialize network header pointer */
		skb_reset_network_header(skb);

		/* initialize protocol header pointer */
		skb->transport_header = skb->network_header + fragheaderlen;

		skb->ip_summed = CHECKSUM_PARTIAL;
		skb->csum = 0;
		sk->sk_sndmsg_off = 0;
	}

	err = skb_append_datato_frags(sk,skb, getfrag, from,
				      (length - transhdrlen));
	if (!err) {
		struct frag_hdr fhdr;

		/* Specify the length of each IPv6 datagram fragment.
		 * It has to be a multiple of 8.
		 */
		skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
					     sizeof(struct frag_hdr)) & ~7;
		skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
		ipv6_select_ident(&fhdr);
		skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
		__skb_queue_tail(&sk->sk_write_queue, skb);

		return 0;
	}
	/* There is not enough support do UPD LSO,
	 * so follow normal path
	 */
	kfree_skb(skb);

	return err;
}
static inline int ip_local_deliver_finish(struct sk_buff *skb)
{
	int ihl = skb->nh.iph->ihl*4;

#ifdef CONFIG_NETFILTER_DEBUG
	nf_debug_ip_local_deliver(skb);
#endif /*CONFIG_NETFILTER_DEBUG*/

	__skb_pull(skb, ihl);

#ifdef CONFIG_NETFILTER
	/* Free reference early: we don't need it any more, and it may
           hold ip_conntrack module loaded indefinitely. */
	nf_conntrack_put(skb->nfct);
	skb->nfct = NULL;
#endif /*CONFIG_NETFILTER*/

        /* Point into the IP datagram, just past the header. */
        skb->h.raw = skb->data;

	{
		/* Note: See raw.c and net/raw.h, RAWV4_HTABLE_SIZE==MAX_INET_PROTOS */
		int protocol = skb->nh.iph->protocol;
		int hash = protocol & (MAX_INET_PROTOS - 1);
		struct sock *raw_sk = raw_v4_htable[hash];
		struct inet_protocol *ipprot;
		int flag;

		/* If there maybe a raw socket we must check - if not we
		 * don't care less
		 */
		if(raw_sk != NULL)
			raw_sk = raw_v4_input(skb, skb->nh.iph, hash);

		ipprot = (struct inet_protocol *) inet_protos[hash];
		flag = 0;
		if(ipprot != NULL) {
			if(raw_sk == NULL &&
			   ipprot->next == NULL &&
			   ipprot->protocol == protocol) {
				int ret;

				/* Fast path... */
				ret = ipprot->handler(skb);

				return ret;
			} else {
				flag = ip_run_ipprot(skb, skb->nh.iph, ipprot, (raw_sk != NULL));
			}
		}

		/* All protocols checked.
		 * If this packet was a broadcast, we may *not* reply to it, since that
		 * causes (proven, grin) ARP storms and a leakage of memory (i.e. all
		 * ICMP reply messages get queued up for transmission...)
		 */
		if(raw_sk != NULL) {	/* Shift to last raw user */
			raw_rcv(raw_sk, skb);
			sock_put(raw_sk);
		} else if (!flag) {		/* Free and report errors */
			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0);	
			kfree_skb(skb);
		}
	}

	return 0;
}
static inline int ip_rcv_finish(struct sk_buff *skb)
{
	struct net_device *dev = skb->dev;
	struct iphdr *iph = skb->nh.iph;

	/*
	 *	Initialise the virtual path cache for the packet. It describes
	 *	how the packet travels inside Linux networking.
	 */ 
	if (skb->dst == NULL) {
		if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))
			goto drop; 
	}

#ifdef CONFIG_NET_CLS_ROUTE
	if (skb->dst->tclassid) {
		struct ip_rt_acct *st = ip_rt_acct + 256*smp_processor_id();
		u32 idx = skb->dst->tclassid;
		st[idx&0xFF].o_packets++;
		st[idx&0xFF].o_bytes+=skb->len;
		st[(idx>>16)&0xFF].i_packets++;
		st[(idx>>16)&0xFF].i_bytes+=skb->len;
	}
#endif

	if (iph->ihl > 5) {
		struct ip_options *opt;

		/* It looks as overkill, because not all
		   IP options require packet mangling.
		   But it is the easiest for now, especially taking
		   into account that combination of IP options
		   and running sniffer is extremely rare condition.
		                                      --ANK (980813)
		*/

		if (skb_cow(skb, skb_headroom(skb)))
			goto drop;
		iph = skb->nh.iph;

		if (ip_options_compile(NULL, skb))
			goto inhdr_error;

		opt = &(IPCB(skb)->opt);
		if (opt->srr) {
			struct in_device *in_dev = in_dev_get(dev);
			if (in_dev) {
				if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
					if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
						printk(KERN_INFO "source route option %u.%u.%u.%u -> %u.%u.%u.%u\n",
						       NIPQUAD(iph->saddr), NIPQUAD(iph->daddr));
					in_dev_put(in_dev);
					goto drop;
				}
				in_dev_put(in_dev);
			}
			if (ip_options_rcv_srr(skb))
				goto drop;
		}
	}

	return skb->dst->input(skb);

inhdr_error:
	IP_INC_STATS_BH(IpInHdrErrors);
drop:
        kfree_skb(skb);
        return NET_RX_DROP;
}
Exemplo n.º 10
0
static int br_nf_pre_routing_finish(struct sk_buff *skb)
{
    struct net_device *dev = skb->dev;
    struct iphdr *iph = ip_hdr(skb);
    struct nf_bridge_info *nf_bridge = skb->nf_bridge;
    int err;

    if (nf_bridge->mask & BRNF_PKT_TYPE) {
        skb->pkt_type = PACKET_OTHERHOST;
        nf_bridge->mask ^= BRNF_PKT_TYPE;
    }
    nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
    if (dnat_took_place(skb)) {
        if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
            struct rtable *rt;
            struct flowi fl = {
                .nl_u = {
                    .ip4_u = {
                         .daddr = iph->daddr,
                         .saddr = 0,
                         .tos = RT_TOS(iph->tos) },
                },
                .proto = 0,
            };
            struct in_device *in_dev = in_dev_get(dev);

            /* If err equals -EHOSTUNREACH the error is due to a
             * martian destination or due to the fact that
             * forwarding is disabled. For most martian packets,
             * ip_route_output_key() will fail. It won't fail for 2 types of
             * martian destinations: loopback destinations and destination
             * 0.0.0.0. In both cases the packet will be dropped because the
             * destination is the loopback device and not the bridge. */
            if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
                goto free_skb;

            if (!ip_route_output_key(&init_net, &rt, &fl)) {
                /* - Bridged-and-DNAT'ed traffic doesn't
                 *   require ip_forwarding. */
                if (((struct dst_entry *)rt)->dev == dev) {
                    skb->dst = (struct dst_entry *)rt;
                    goto bridged_dnat;
                }
                /* we are sure that forwarding is disabled, so printing
                 * this message is no problem. Note that the packet could
                 * still have a martian destination address, in which case
                 * the packet could be dropped even if forwarding were enabled */
                __br_dnat_complain();
                dst_release((struct dst_entry *)rt);
            }
free_skb:
            kfree_skb(skb);
            return 0;
        } else {
            if (skb->dst->dev == dev) {
bridged_dnat:
                /* Tell br_nf_local_out this is a
                 * bridged frame */
                nf_bridge->mask |= BRNF_BRIDGED_DNAT;
                skb->dev = nf_bridge->physindev;
                nf_bridge_push_encap_header(skb);
                NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING,
                           skb, skb->dev, NULL,
                           br_nf_pre_routing_finish_bridge,
                           1);
                return 0;
            }
            memcpy(eth_hdr(skb)->h_dest, dev->dev_addr, ETH_ALEN);
            skb->pkt_type = PACKET_HOST;
        }
    } else {
Exemplo n.º 11
0
/**
 * tipc_disc_recv_msg - handle incoming link setup message (request or response)
 * @buf: buffer containing message
 * @b_ptr: bearer that message arrived on
 */
void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
{
	struct tipc_node *n_ptr;
	struct tipc_link *link;
	struct tipc_media_addr media_addr;
	struct sk_buff *rbuf;
	struct tipc_msg *msg = buf_msg(buf);
	u32 dest = msg_dest_domain(msg);
	u32 orig = msg_prevnode(msg);
	u32 net_id = msg_bc_netid(msg);
	u32 type = msg_type(msg);
	u32 signature = msg_node_sig(msg);
	int addr_mismatch;
	int link_fully_up;

	media_addr.broadcast = 1;
	b_ptr->media->msg2addr(b_ptr, &media_addr, msg_media_addr(msg));
	kfree_skb(buf);

	/* Ensure message from node is valid and communication is permitted */
	if (net_id != tipc_net_id)
		return;
	if (media_addr.broadcast)
		return;
	if (!tipc_addr_domain_valid(dest))
		return;
	if (!tipc_addr_node_valid(orig))
		return;
	if (orig == tipc_own_addr) {
		if (memcmp(&media_addr, &b_ptr->addr, sizeof(media_addr)))
			disc_dupl_alert(b_ptr, tipc_own_addr, &media_addr);
		return;
	}
	if (!tipc_in_scope(dest, tipc_own_addr))
		return;
	if (!tipc_in_scope(b_ptr->link_req->domain, orig))
		return;

	/* Locate structure corresponding to requesting node */
	n_ptr = tipc_node_find(orig);
	if (!n_ptr) {
		n_ptr = tipc_node_create(orig);
		if (!n_ptr)
			return;
	}
	tipc_node_lock(n_ptr);

	/* Prepare to validate requesting node's signature and media address */
	link = n_ptr->links[b_ptr->identity];
	addr_mismatch = (link != NULL) &&
		memcmp(&link->media_addr, &media_addr, sizeof(media_addr));

	/*
	 * Ensure discovery message's signature is correct
	 *
	 * If signature is incorrect and there is no working link to the node,
	 * accept the new signature but invalidate all existing links to the
	 * node so they won't re-activate without a new discovery message.
	 *
	 * If signature is incorrect and the requested link to the node is
	 * working, accept the new signature. (This is an instance of delayed
	 * rediscovery, where a link endpoint was able to re-establish contact
	 * with its peer endpoint on a node that rebooted before receiving a
	 * discovery message from that node.)
	 *
	 * If signature is incorrect and there is a working link to the node
	 * that is not the requested link, reject the request (must be from
	 * a duplicate node).
	 */
	if (signature != n_ptr->signature) {
		if (n_ptr->working_links == 0) {
			struct tipc_link *curr_link;
			int i;

			for (i = 0; i < MAX_BEARERS; i++) {
				curr_link = n_ptr->links[i];
				if (curr_link) {
					memset(&curr_link->media_addr, 0,
					       sizeof(media_addr));
					tipc_link_reset(curr_link);
				}
			}
			addr_mismatch = (link != NULL);
		} else if (tipc_link_is_up(link) && !addr_mismatch) {
			/* delayed rediscovery */
		} else {
			disc_dupl_alert(b_ptr, orig, &media_addr);
			tipc_node_unlock(n_ptr);
			return;
		}
		n_ptr->signature = signature;
	}

	/*
	 * Ensure requesting node's media address is correct
	 *
	 * If media address doesn't match and the link is working, reject the
	 * request (must be from a duplicate node).
	 *
	 * If media address doesn't match and the link is not working, accept
	 * the new media address and reset the link to ensure it starts up
	 * cleanly.
	 */
	if (addr_mismatch) {
		if (tipc_link_is_up(link)) {
			disc_dupl_alert(b_ptr, orig, &media_addr);
			tipc_node_unlock(n_ptr);
			return;
		} else {
			memcpy(&link->media_addr, &media_addr,
			       sizeof(media_addr));
			tipc_link_reset(link);
		}
	}

	/* Create a link endpoint for this bearer, if necessary */
	if (!link) {
		link = tipc_link_create(n_ptr, b_ptr, &media_addr);
		if (!link) {
			tipc_node_unlock(n_ptr);
			return;
		}
	}

	/* Accept discovery message & send response, if necessary */
	link_fully_up = link_working_working(link);

	if ((type == DSC_REQ_MSG) && !link_fully_up && !b_ptr->blocked) {
		rbuf = tipc_disc_init_msg(DSC_RESP_MSG, orig, b_ptr);
		if (rbuf) {
			tipc_bearer_send(b_ptr, rbuf, &media_addr);
			kfree_skb(rbuf);
		}
	}

	tipc_node_unlock(n_ptr);
}
Exemplo n.º 12
0
/* Execute a list of actions against 'skb'. */
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
			const struct nlattr *attr, int len, bool keep_skb)
{
	/* Every output action needs a separate clone of 'skb', but the common
	 * case is just a single output action, so that doing a clone and
	 * then freeing the original skbuff is wasteful.  So the following code
	 * is slightly obscure just to avoid that. */
	int prev_port = -1;
	const struct nlattr *a;
	int rem;

	for (a = attr, rem = len; rem > 0;
	     a = nla_next(a, &rem)) {
		int err = 0;

		if (prev_port != -1) {
			do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
			prev_port = -1;
		}

		switch (nla_type(a)) {
		case OVS_ACTION_ATTR_OUTPUT:
			prev_port = nla_get_u32(a);
			break;

		case OVS_ACTION_ATTR_USERSPACE:
			output_userspace(dp, skb, a);
			break;

		case OVS_ACTION_ATTR_PUSH_VLAN:
			err = push_vlan(skb, nla_data(a));
			if (unlikely(err)) /* skb already freed. */
				return err;
			break;

		case OVS_ACTION_ATTR_POP_VLAN:
			err = pop_vlan(skb);
			break;

		case OVS_ACTION_ATTR_SET:
			err = execute_set_action(skb, nla_data(a));
			break;

		case OVS_ACTION_ATTR_SAMPLE:
			err = sample(dp, skb, a);
			break;
		}

		if (unlikely(err)) {
			kfree_skb(skb);
			return err;
		}
	}

	if (prev_port != -1) {
		if (keep_skb)
			skb = skb_clone(skb, GFP_ATOMIC);

		do_output(dp, skb, prev_port);
	} else if (!keep_skb)
		consume_skb(skb);

	return 0;
}
Exemplo n.º 13
0
static ssize_t schar_write_3(struct file *file, const char *buf, size_t count,
			   loff_t *offset)
{
  //  struct sock *sk;
  // char *sbuf;
  // char *pnt2;
  // unsigned char *addr;
  int len;
  int err;
  // int i;
  static struct net_device *dev;
  static struct sk_buff *skb;
  static unsigned short proto=0;
  // printk(KERN_INFO " length %d \n",count);

  // sbuf=kmalloc(9000,GFP_KERNEL);
  len=count;
  dev=dev_get_by_name("eth3");
  err=-ENODEV;
  if (dev == NULL)
   goto out_unlock;
            
/*
 *      You may not queue a frame bigger than the mtu. This is the lowest level
 *      raw protocol and you must do your own fragmentation at this level.
*/
                
  err = -EMSGSIZE;
  if(len>dev->mtu+dev->hard_header_len)
  goto out_unlock;
     
  err = -ENOBUFS;
  //  skb = sock_wmalloc(sk, len+dev->hard_header_len+15, 0, GFP_KERNEL);
  skb=dev_alloc_skb(len+dev->hard_header_len+15);   
/*
 *      If the write buffer is full, then tough. At this level the user gets to
 *      deal with the problem - do your own algorithmic backoffs. That's far
 *      more flexible.
*/
              
  if (skb == NULL) 
  goto out_unlock;
     
/*
*      Fill it in 
*/
              
/* FIXME: Save some space for broken drivers that write a
* hard header at transmission time by themselves. PPP is the
* notable one here. This should really be fixed at the driver level.
*/
   skb_reserve(skb,(dev->hard_header_len+15)&~15);
   skb->nh.raw = skb->data;
   proto=htons(ETH_P_ALL);
   /*     	if (dev->hard_header) {
		int res;
		err = -EINVAL;
                addr=NULL;
		res = dev->hard_header(skb, dev, ntohs(proto), addr, NULL, len);
			skb->tail = skb->data;
			skb->len = 0;
			} */
        			
                        skb->tail = skb->data;
			skb->len = 0;

/* Try to align data part correctly */
			/*      if (dev->hard_header) {
      skb->data -= dev->hard_header_len;
      skb->tail -= dev->hard_header_len;
      } */
 			
  
     //  printk(KERN_INFO " header length %d  \n",dev->hard_header_len);
/* Returns -EFAULT on error */
   //  err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
	
    err = copy_from_user(skb_put(skb,len),buf, count);
   // err = memcpy_fromio(skb_put(skb,len),sbuf,len);
   // printk(KERN_INFO " lsd: len count %d %d %02x  \n",len,count,*(skb->data+98)&0xff);
   skb->protocol = htons(ETH_P_ALL);
   skb->dev = dev;
   skb->priority = 0;
   // skb->pkt_type=PACKET_MR_PROMISC;
   skb->ip_summed=CHECKSUM_UNNECESSARY;
   if (err)
   goto out_free;
     
   err = -ENETDOWN;
   if (!(dev->flags & IFF_UP))
   goto out_free;
     
/*
*      Now send it
*/
     
   dev_queue_xmit(skb);
   dev_put(dev); 
   // printk(KERN_INFO " lsd: len count %d %d %02x  \n",len,count,*(skb->data+98)&0xff);

   // kfree(sbuf);
   proc_tpackets_3=proc_tpackets_3+1;
   proc_tbytesL_3=proc_tbytesL_3+len;
   if(proc_tbytesL_3>1000000000){
      proc_tbytesL_3=proc_tbytesL_3-1000000000;
      proc_tbytesH_3=proc_tbytesH_3+1;
   } 
   return count;
     
   out_free:
     kfree_skb(skb);
   out_unlock:
     // if (dev)dev_put(dev);
      // kfree(sbuf);           
  return -EFAULT;
}
Exemplo n.º 14
0
/*
 * All SKB's seen here are completely headerless. It is our
 * job to build the DCCP header, and pass the packet down to
 * IP so it can do the same plus pass the packet off to the
 * device.
 */
static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
{
	if (likely(skb != NULL)) {
		struct inet_sock *inet = inet_sk(sk);
		const struct inet_connection_sock *icsk = inet_csk(sk);
		struct dccp_sock *dp = dccp_sk(sk);
		struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
		struct dccp_hdr *dh;
		/* XXX For now we're using only 48 bits sequence numbers */
		const u32 dccp_header_size = sizeof(*dh) +
					     sizeof(struct dccp_hdr_ext) +
					  dccp_packet_hdr_len(dcb->dccpd_type);
		int err, set_ack = 1;
		u64 ackno = dp->dccps_gsr;
		/*
		 * Increment GSS here already in case the option code needs it.
		 * Update GSS for real only if option processing below succeeds.
		 */
		dcb->dccpd_seq = ADD48(dp->dccps_gss, 1);

		switch (dcb->dccpd_type) {
		case DCCP_PKT_DATA:
			set_ack = 0;
			/* fall through */
		case DCCP_PKT_DATAACK:
		case DCCP_PKT_RESET:
			break;

		case DCCP_PKT_REQUEST:
			set_ack = 0;
			/* Use ISS on the first (non-retransmitted) Request. */
			if (icsk->icsk_retransmits == 0)
				dcb->dccpd_seq = dp->dccps_iss;
			/* fall through */

		case DCCP_PKT_SYNC:
		case DCCP_PKT_SYNCACK:
			ackno = dcb->dccpd_ack_seq;
			/* fall through */
		default:
			/*
			 * Set owner/destructor: some skbs are allocated via
			 * alloc_skb (e.g. when retransmission may happen).
			 * Only Data, DataAck, and Reset packets should come
			 * through here with skb->sk set.
			 */
			WARN_ON(skb->sk);
			skb_set_owner_w(skb, sk);
			break;
		}

		if (dccp_insert_options(sk, skb)) {
			kfree_skb(skb);
			return -EPROTO;
		}


		/* Build DCCP header and checksum it. */
		dh = dccp_zeroed_hdr(skb, dccp_header_size);
		dh->dccph_type	= dcb->dccpd_type;
		dh->dccph_sport	= inet->inet_sport;
		dh->dccph_dport	= inet->inet_dport;
		dh->dccph_doff	= (dccp_header_size + dcb->dccpd_opt_len) / 4;
		dh->dccph_ccval	= dcb->dccpd_ccval;
		dh->dccph_cscov = dp->dccps_pcslen;
		/* XXX For now we're using only 48 bits sequence numbers */
		dh->dccph_x	= 1;

		dccp_update_gss(sk, dcb->dccpd_seq);
		dccp_hdr_set_seq(dh, dp->dccps_gss);
		if (set_ack)
			dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);

		switch (dcb->dccpd_type) {
		case DCCP_PKT_REQUEST:
			dccp_hdr_request(skb)->dccph_req_service =
							dp->dccps_service;
			/*
			 * Limit Ack window to ISS <= P.ackno <= GSS, so that
			 * only Responses to Requests we sent are considered.
			 */
			dp->dccps_awl = dp->dccps_iss;
			break;
		case DCCP_PKT_RESET:
			dccp_hdr_reset(skb)->dccph_reset_code =
							dcb->dccpd_reset_code;
			break;
		}

		icsk->icsk_af_ops->send_check(sk, skb);

		if (set_ack)
			dccp_event_ack_sent(sk);

		DCCP_INC_STATS(DCCP_MIB_OUTSEGS);

		err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl);
		return net_xmit_eval(err);
	}
	return -ENOBUFS;
}
Exemplo n.º 15
0
static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb)
{
	dst_link_failure(skb);
	kfree_skb(skb);
}
/*
 * 	Main IP Receive routine.
 */ 
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt)
{
	struct iphdr *iph;

	/* When the interface is in promisc. mode, drop all the crap
	 * that it receives, do not try to analyse it.
	 */
	if (skb->pkt_type == PACKET_OTHERHOST)
		goto drop;

	IP_INC_STATS_BH(IpInReceives);

	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
		goto out;

	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
		goto inhdr_error;

	iph = skb->nh.iph;

	/*
	 *	RFC1122: 3.1.2.2 MUST silently discard any IP frame that fails the checksum.
	 *
	 *	Is the datagram acceptable?
	 *
	 *	1.	Length at least the size of an ip header
	 *	2.	Version of 4
	 *	3.	Checksums correctly. [Speed optimisation for later, skip loopback checksums]
	 *	4.	Doesn't have a bogus length
	 */

	if (iph->ihl < 5 || iph->version != 4)
		goto inhdr_error; 

	if (!pskb_may_pull(skb, iph->ihl*4))
		goto inhdr_error;

	iph = skb->nh.iph;

#ifndef CONFIG_CSUM_UNNECESSARY
	if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
		goto inhdr_error; 
#endif

	{
		__u32 len = ntohs(iph->tot_len); 
		if (skb->len < len || len < (iph->ihl<<2))
			goto inhdr_error;

		/* Our transport medium may have padded the buffer out. Now we know it
		 * is IP we can trim to the true length of the frame.
		 * Note this now means skb->len holds ntohs(iph->tot_len).
		 */
		if (skb->len > len) {
			__pskb_trim(skb, len);
			if (skb->ip_summed == CHECKSUM_HW)
				skb->ip_summed = CHECKSUM_NONE;
		}
	}

	return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL,
		       ip_rcv_finish);

inhdr_error:
	IP_INC_STATS_BH(IpInHdrErrors);
drop:
        kfree_skb(skb);
out:
        return NET_RX_DROP;
}
Exemplo n.º 17
0
/*
 *	Create an arp packet. If (dest_hw == NULL), we create a broadcast
 *	message.
 */
struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
			   struct net_device *dev, __be32 src_ip,
			   const unsigned char *dest_hw,
			   const unsigned char *src_hw,
			   const unsigned char *target_hw)
{
	struct sk_buff *skb;
	struct arphdr *arp;
	unsigned char *arp_ptr;

	/*
	 *	Allocate a buffer
	 */

	skb = alloc_skb(arp_hdr_len(dev) + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
	if (skb == NULL)
		return NULL;

	skb_reserve(skb, LL_RESERVED_SPACE(dev));
	skb_reset_network_header(skb);
	arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev));
	skb->dev = dev;
	skb->protocol = htons(ETH_P_ARP);
	if (src_hw == NULL)
		src_hw = dev->dev_addr;
	if (dest_hw == NULL)
		dest_hw = dev->broadcast;

	/*
	 *	Fill the device header for the ARP frame
	 */
	if (dev_hard_header(skb, dev, ptype, dest_hw, src_hw, skb->len) < 0)
		goto out;

	/*
	 * Fill out the arp protocol part.
	 *
	 * The arp hardware type should match the device type, except for FDDI,
	 * which (according to RFC 1390) should always equal 1 (Ethernet).
	 */
	/*
	 *	Exceptions everywhere. AX.25 uses the AX.25 PID value not the
	 *	DIX code for the protocol. Make these device structure fields.
	 */
	switch (dev->type) {
	default:
		arp->ar_hrd = htons(dev->type);
		arp->ar_pro = htons(ETH_P_IP);
		break;

#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
	case ARPHRD_AX25:
		arp->ar_hrd = htons(ARPHRD_AX25);
		arp->ar_pro = htons(AX25_P_IP);
		break;

#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
	case ARPHRD_NETROM:
		arp->ar_hrd = htons(ARPHRD_NETROM);
		arp->ar_pro = htons(AX25_P_IP);
		break;
#endif
#endif

#if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE)
	case ARPHRD_FDDI:
		arp->ar_hrd = htons(ARPHRD_ETHER);
		arp->ar_pro = htons(ETH_P_IP);
		break;
#endif
#if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
	case ARPHRD_IEEE802_TR:
		arp->ar_hrd = htons(ARPHRD_IEEE802);
		arp->ar_pro = htons(ETH_P_IP);
		break;
#endif
	}

	arp->ar_hln = dev->addr_len;
	arp->ar_pln = 4;
	arp->ar_op = htons(type);

	arp_ptr = (unsigned char *)(arp + 1);

	memcpy(arp_ptr, src_hw, dev->addr_len);
	arp_ptr += dev->addr_len;
	memcpy(arp_ptr, &src_ip, 4);
	arp_ptr += 4;
	if (target_hw != NULL)
		memcpy(arp_ptr, target_hw, dev->addr_len);
	else
		memset(arp_ptr, 0, dev->addr_len);
	arp_ptr += dev->addr_len;
	memcpy(arp_ptr, &dest_ip, 4);

	return skb;

out:
	kfree_skb(skb);
	return NULL;
}
Exemplo n.º 18
0
/* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table
 * about the status of a specific node in the network, defined by its MAC
 * address.
 *
 * Input: hsr ifindex, node mac address
 * Output: hsr ifindex, node mac address (copied from request),
 *	   age of latest frame from node over slave 1, slave 2 [ms]
 */
static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
{
    /* For receiving */
    struct nlattr *na;
    struct net_device *hsr_dev;

    /* For sending */
    struct sk_buff *skb_out;
    void *msg_head;
    struct hsr_priv *hsr;
    struct hsr_port *port;
    unsigned char hsr_node_addr_b[ETH_ALEN];
    int hsr_node_if1_age;
    u16 hsr_node_if1_seq;
    int hsr_node_if2_age;
    u16 hsr_node_if2_seq;
    int addr_b_ifindex;
    int res;

    if (!info)
        goto invalid;

    na = info->attrs[HSR_A_IFINDEX];
    if (!na)
        goto invalid;
    na = info->attrs[HSR_A_NODE_ADDR];
    if (!na)
        goto invalid;

    hsr_dev = __dev_get_by_index(genl_info_net(info),
                                 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
    if (!hsr_dev)
        goto invalid;
    if (!is_hsr_master(hsr_dev))
        goto invalid;


    /* Send reply */

    skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
    if (!skb_out) {
        res = -ENOMEM;
        goto fail;
    }

    msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
                           info->snd_seq, &hsr_genl_family, 0,
                           HSR_C_SET_NODE_STATUS);
    if (!msg_head) {
        res = -ENOMEM;
        goto nla_put_failure;
    }

    res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
    if (res < 0)
        goto nla_put_failure;

    hsr = netdev_priv(hsr_dev);
    res = hsr_get_node_data(hsr,
                            (unsigned char *) nla_data(info->attrs[HSR_A_NODE_ADDR]),
                            hsr_node_addr_b,
                            &addr_b_ifindex,
                            &hsr_node_if1_age,
                            &hsr_node_if1_seq,
                            &hsr_node_if2_age,
                            &hsr_node_if2_seq);
    if (res < 0)
        goto nla_put_failure;

    res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
                  nla_data(info->attrs[HSR_A_NODE_ADDR]));
    if (res < 0)
        goto nla_put_failure;

    if (addr_b_ifindex > -1) {
        res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
                      hsr_node_addr_b);
        if (res < 0)
            goto nla_put_failure;

        res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX, addr_b_ifindex);
        if (res < 0)
            goto nla_put_failure;
    }

    res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age);
    if (res < 0)
        goto nla_put_failure;
    res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
    if (res < 0)
        goto nla_put_failure;
    rcu_read_lock();
    port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
    if (port)
        res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
                          port->dev->ifindex);
    rcu_read_unlock();
    if (res < 0)
        goto nla_put_failure;

    res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age);
    if (res < 0)
        goto nla_put_failure;
    res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
    if (res < 0)
        goto nla_put_failure;
    rcu_read_lock();
    port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
    if (port)
        res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
                          port->dev->ifindex);
    rcu_read_unlock();
    if (res < 0)
        goto nla_put_failure;

    genlmsg_end(skb_out, msg_head);
    genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);

    return 0;

invalid:
    netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL);
    return 0;

nla_put_failure:
    kfree_skb(skb_out);
    /* Fall through */

fail:
    return res;
}
Exemplo n.º 19
0
/*
 * Slightly more convenient version of icmpv6_send.
 */
void icmpv6_param_prob(struct sk_buff *skb, int code, int pos)
{
	icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
	kfree_skb(skb);
}
Exemplo n.º 20
0
/* Get a list of MacAddressA of all nodes known to this node (including self).
 */
static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
{
    /* For receiving */
    struct nlattr *na;
    struct net_device *hsr_dev;

    /* For sending */
    struct sk_buff *skb_out;
    void *msg_head;
    struct hsr_priv *hsr;
    void *pos;
    unsigned char addr[ETH_ALEN];
    int res;

    if (!info)
        goto invalid;

    na = info->attrs[HSR_A_IFINDEX];
    if (!na)
        goto invalid;

    hsr_dev = __dev_get_by_index(genl_info_net(info),
                                 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
    if (!hsr_dev)
        goto invalid;
    if (!is_hsr_master(hsr_dev))
        goto invalid;


    /* Send reply */

    skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
    if (!skb_out) {
        res = -ENOMEM;
        goto fail;
    }

    msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
                           info->snd_seq, &hsr_genl_family, 0,
                           HSR_C_SET_NODE_LIST);
    if (!msg_head) {
        res = -ENOMEM;
        goto nla_put_failure;
    }

    res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
    if (res < 0)
        goto nla_put_failure;

    hsr = netdev_priv(hsr_dev);

    rcu_read_lock();
    pos = hsr_get_next_node(hsr, NULL, addr);
    while (pos) {
        res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
        if (res < 0) {
            rcu_read_unlock();
            goto nla_put_failure;
        }
        pos = hsr_get_next_node(hsr, pos, addr);
    }
    rcu_read_unlock();

    genlmsg_end(skb_out, msg_head);
    genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);

    return 0;

invalid:
    netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL);
    return 0;

nla_put_failure:
    kfree_skb(skb_out);
    /* Fall through */

fail:
    return res;
}
Exemplo n.º 21
0
int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
	int offset, int len, int odd, struct sk_buff *skb),
	void *from, int length, int transhdrlen,
	int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
	struct rt6_info *rt, unsigned int flags)
{
	struct inet_sock *inet = inet_sk(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct sk_buff *skb;
	unsigned int maxfraglen, fragheaderlen;
	int exthdrlen;
	int hh_len;
	int mtu;
	int copy;
	int err;
	int offset = 0;
	int csummode = CHECKSUM_NONE;

	if (flags&MSG_PROBE)
		return 0;
	if (skb_queue_empty(&sk->sk_write_queue)) {
		/*
		 * setup for corking
		 */
		if (opt) {
			if (WARN_ON(np->cork.opt))
				return -EINVAL;

			np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation);
			if (unlikely(np->cork.opt == NULL))
				return -ENOBUFS;

			np->cork.opt->tot_len = opt->tot_len;
			np->cork.opt->opt_flen = opt->opt_flen;
			np->cork.opt->opt_nflen = opt->opt_nflen;

			np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
							    sk->sk_allocation);
			if (opt->dst0opt && !np->cork.opt->dst0opt)
				return -ENOBUFS;

			np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
							    sk->sk_allocation);
			if (opt->dst1opt && !np->cork.opt->dst1opt)
				return -ENOBUFS;

			np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
							   sk->sk_allocation);
			if (opt->hopopt && !np->cork.opt->hopopt)
				return -ENOBUFS;

			np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
							    sk->sk_allocation);
			if (opt->srcrt && !np->cork.opt->srcrt)
				return -ENOBUFS;

			/* need source address above miyazawa*/
		}
		dst_hold(&rt->u.dst);
		inet->cork.dst = &rt->u.dst;
		inet->cork.fl = *fl;
		np->cork.hop_limit = hlimit;
		np->cork.tclass = tclass;
		mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
		      rt->u.dst.dev->mtu : dst_mtu(rt->u.dst.path);
		if (np->frag_size < mtu) {
			if (np->frag_size)
				mtu = np->frag_size;
		}
		inet->cork.fragsize = mtu;
		if (dst_allfrag(rt->u.dst.path))
			inet->cork.flags |= IPCORK_ALLFRAG;
		inet->cork.length = 0;
		sk->sk_sndmsg_page = NULL;
		sk->sk_sndmsg_off = 0;
		exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0) -
			    rt->rt6i_nfheader_len;
		length += exthdrlen;
		transhdrlen += exthdrlen;
	} else {
		rt = (struct rt6_info *)inet->cork.dst;
		fl = &inet->cork.fl;
		opt = np->cork.opt;
		transhdrlen = 0;
		exthdrlen = 0;
		mtu = inet->cork.fragsize;
	}

	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);

	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
			(opt ? opt->opt_nflen : 0);
	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);

	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
		if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
			ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
			return -EMSGSIZE;
		}
	}

	/*
	 * Let's try using as much space as possible.
	 * Use MTU if total length of the message fits into the MTU.
	 * Otherwise, we need to reserve fragment header and
	 * fragment alignment (= 8-15 octects, in total).
	 *
	 * Note that we may need to "move" the data from the tail of
	 * of the buffer to the new fragment when we split
	 * the message.
	 *
	 * FIXME: It may be fragmented into multiple chunks
	 *        at once if non-fragmentable extension headers
	 *        are too large.
	 * --yoshfuji
	 */

	inet->cork.length += length;
	if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
	    (rt->u.dst.dev->features & NETIF_F_UFO)) {

		err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
					  fragheaderlen, transhdrlen, mtu,
					  flags);
		if (err)
			goto error;
		return 0;
	}

	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
		goto alloc_new_skb;

	while (length > 0) {
		/* Check if the remaining data fits into current packet. */
		copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
		if (copy < length)
			copy = maxfraglen - skb->len;

		if (copy <= 0) {
			char *data;
			unsigned int datalen;
			unsigned int fraglen;
			unsigned int fraggap;
			unsigned int alloclen;
			struct sk_buff *skb_prev;
alloc_new_skb:
			skb_prev = skb;

			/* There's no room in the current skb */
			if (skb_prev)
				fraggap = skb_prev->len - maxfraglen;
			else
				fraggap = 0;

			/*
			 * If remaining data exceeds the mtu,
			 * we know we need more fragment(s).
			 */
			datalen = length + fraggap;
			if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
				datalen = maxfraglen - fragheaderlen;

			fraglen = datalen + fragheaderlen;
			if ((flags & MSG_MORE) &&
			    !(rt->u.dst.dev->features&NETIF_F_SG))
				alloclen = mtu;
			else
				alloclen = datalen + fragheaderlen;

			/*
			 * The last fragment gets additional space at tail.
			 * Note: we overallocate on fragments with MSG_MODE
			 * because we have no idea if we're the last one.
			 */
			if (datalen == length + fraggap)
				alloclen += rt->u.dst.trailer_len;

			/*
			 * We just reserve space for fragment header.
			 * Note: this may be overallocation if the message
			 * (without MSG_MORE) fits into the MTU.
			 */
			alloclen += sizeof(struct frag_hdr);

			if (transhdrlen) {
				skb = sock_alloc_send_skb(sk,
						alloclen + hh_len,
						(flags & MSG_DONTWAIT), &err);
			} else {
				skb = NULL;
				if (atomic_read(&sk->sk_wmem_alloc) <=
				    2 * sk->sk_sndbuf)
					skb = sock_wmalloc(sk,
							   alloclen + hh_len, 1,
							   sk->sk_allocation);
				if (unlikely(skb == NULL))
					err = -ENOBUFS;
			}
			if (skb == NULL)
				goto error;
			/*
			 *	Fill in the control structures
			 */
			skb->ip_summed = csummode;
			skb->csum = 0;
			/* reserve for fragmentation */
			skb_reserve(skb, hh_len+sizeof(struct frag_hdr));

			/*
			 *	Find where to start putting bytes
			 */
			data = skb_put(skb, fraglen);
			skb_set_network_header(skb, exthdrlen);
			data += fragheaderlen;
			skb->transport_header = (skb->network_header +
						 fragheaderlen);
			if (fraggap) {
				skb->csum = skb_copy_and_csum_bits(
					skb_prev, maxfraglen,
					data + transhdrlen, fraggap, 0);
				skb_prev->csum = csum_sub(skb_prev->csum,
							  skb->csum);
				data += fraggap;
				pskb_trim_unique(skb_prev, maxfraglen);
			}
			copy = datalen - transhdrlen - fraggap;
			if (copy < 0) {
				err = -EINVAL;
				kfree_skb(skb);
				goto error;
			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
				err = -EFAULT;
				kfree_skb(skb);
				goto error;
			}

			offset += copy;
			length -= datalen - fraggap;
			transhdrlen = 0;
			exthdrlen = 0;
			csummode = CHECKSUM_NONE;

			/*
			 * Put the packet on the pending queue
			 */
			__skb_queue_tail(&sk->sk_write_queue, skb);
			continue;
		}

		if (copy > length)
			copy = length;

		if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
			unsigned int off;

			off = skb->len;
			if (getfrag(from, skb_put(skb, copy),
						offset, copy, off, skb) < 0) {
				__skb_trim(skb, off);
				err = -EFAULT;
				goto error;
			}
		} else {
			int i = skb_shinfo(skb)->nr_frags;
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
			struct page *page = sk->sk_sndmsg_page;
			int off = sk->sk_sndmsg_off;
			unsigned int left;

			if (page && (left = PAGE_SIZE - off) > 0) {
				if (copy >= left)
					copy = left;
				if (page != frag->page) {
					if (i == MAX_SKB_FRAGS) {
						err = -EMSGSIZE;
						goto error;
					}
					get_page(page);
					skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
					frag = &skb_shinfo(skb)->frags[i];
				}
			} else if(i < MAX_SKB_FRAGS) {
				if (copy > PAGE_SIZE)
					copy = PAGE_SIZE;
				page = alloc_pages(sk->sk_allocation, 0);
				if (page == NULL) {
					err = -ENOMEM;
					goto error;
				}
				sk->sk_sndmsg_page = page;
				sk->sk_sndmsg_off = 0;

				skb_fill_page_desc(skb, i, page, 0, 0);
				frag = &skb_shinfo(skb)->frags[i];
			} else {
				err = -EMSGSIZE;
				goto error;
			}
			if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
				err = -EFAULT;
				goto error;
			}
			sk->sk_sndmsg_off += copy;
			frag->size += copy;
			skb->len += copy;
			skb->data_len += copy;
			skb->truesize += copy;
			atomic_add(copy, &sk->sk_wmem_alloc);
		}
		offset += copy;
		length -= copy;
	}
	return 0;
error:
	inet->cork.length -= length;
	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
	return err;
}
Exemplo n.º 22
0
static int ipsec_ocf_rcv_cb(struct cryptop *crp)
{
	struct ipsec_rcv_state *irs =
		(struct ipsec_rcv_state *)crp->crp_opaque;
	struct iphdr *newiph;
	unsigned orig_len, decomp_len;
	struct cryptodesc *crdc = NULL;

	KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv_cb\n");

	if (irs == NULL) {
		KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv_cb: "
			    "NULL irs in callback\n");
		return 0;
	}

	/*
	 * we must update the state before returning to the state machine.
	 * if we have an error,  terminate the processing by moving to the DONE
	 * state
	 */

	irs->state = IPSEC_RSM_DONE; /* assume it went badly */

	if (crp->crp_etype) {
		ptrdiff_t ptr_delta;

		if (crp->crp_etype == EAGAIN) {
			/* Session has been migrated. Store the new session id and retry */
			KLIPS_PRINT(debug_rcv,
				    "klips_debug:ipsec_ocf_rcv_cb: crypto session migrated\n");
			irs->ipsp->ocf_cryptoid = crp->crp_sid;
			/* resubmit request */
			if (crypto_dispatch(crp) == 0)
				return 0;
			/* resubmit failed */
		}

		KLIPS_PRINT(debug_rcv, "klips_debug:ipsec_ocf_rcv_cb: "
			    "error in processing 0x%x\n", crp->crp_etype);

		switch (irs->ipsp->ips_said.proto) {
		case IPPROTO_COMP:
			/*
			 * we restore the previous skb on error and pretend nothing
			 * happened, just no compression
			 */
			ptr_delta = irs->pre_ipcomp_skb->data - irs->skb->data;
			irs->authenticator =
				(void*)((char*)irs->authenticator + ptr_delta);
			irs->iph           =
				(void*)((char*)irs->iph           + ptr_delta);

			kfree_skb(irs->skb);
			irs->skb = irs->pre_ipcomp_skb;
			irs->pre_ipcomp_skb = NULL;
			break;
		}

		goto bail;
	}

	switch (irs->ipsp->ips_said.proto) {
	case IPPROTO_ESP:
		/* ESP, process it */
		if (ipsec_rcv_esp_post_decrypt(irs) == IPSEC_RCV_OK) {
			/* this one came up good, set next state */
			irs->state = IPSEC_RSM_DECAP_CONT;
		}
		break;

	case IPPROTO_AH:
		/* AH post processing, put back fields we had to zero */
		if (lsw_ip_hdr_version(irs) == 4) {
			lsw_ip4_hdr(irs)->ttl      = irs->ttl;
			lsw_ip4_hdr(irs)->check    = irs->check;
			lsw_ip4_hdr(irs)->frag_off = irs->frag_off;
			lsw_ip4_hdr(irs)->tos      = irs->tos;
		}
		irs->state         = IPSEC_RSM_AUTH_CHK;

		/* pull up the IP header again after processing */
		skb_pull(irs->skb,
			 ((unsigned char *)irs->protostuff.ahstuff.ahp) -
			 ((unsigned char *)irs->iph));

		break;

	case IPPROTO_COMP:
		crdc = crp->crp_desc;

		KLIPS_PRINT(debug_rcv, "comp before adjustments:\n");
		KLIPS_IP_PRINT(debug_rcv & DB_TN_XMIT, irs->iph);

		orig_len = irs->skb->len - sizeof(struct ipcomphdr);
		decomp_len = crp->crp_olen;

		newiph = (struct iphdr*)
			((char*)irs->iph + sizeof(struct ipcomphdr));

		KLIPS_PRINT(debug_rcv,
			    "comp results: olen: %u, inject: %u (len=%d) iph->totlen=%u\n",
			    crp->crp_olen, crdc->crd_inject, decomp_len,
			    ntohs(newiph->tot_len));

		/*
		 * move the ip header to consume room previously taken by
		 * the ipcomp header
		 */
		skb_pull(irs->skb, sizeof(struct ipcomphdr));
		memmove(newiph, irs->iph, irs->iphlen);

		/* adjust the ipp pointer to point to the header we decoded */
		irs->iph = newiph;

		skb_set_network_header(irs->skb,
				       ipsec_skb_offset(irs->skb,
							((unsigned char *)
							skb_network_header(irs->skb))
								  +
								  sizeof(struct
									 ipcomphdr)));
		skb_set_transport_header(irs->skb,
					 ipsec_skb_offset(irs->skb,
							  ((unsigned char *)
							  skb_transport_header(irs->skb))
								    +
								    sizeof(struct
									    ipcomphdr)));

		if (lsw_ip_hdr_version(irs) == 6) {
			lsw_ip6_hdr(irs)->nexthdr  = irs->next_header;
		} else {
			lsw_ip4_hdr(irs)->protocol = irs->next_header;
			lsw_ip4_hdr(irs)->tot_len = htons(
				irs->iphlen + decomp_len);
			lsw_ip4_hdr(irs)->check = 0;
			lsw_ip4_hdr(irs)->check = ip_fast_csum(irs->iph, lsw_ip4_hdr(
								       irs)->ihl);
		}

		KLIPS_PRINT(debug_rcv, "comp after len adjustments:\n");
		KLIPS_IP_PRINT(debug_rcv & DB_TN_XMIT, irs->iph);

		/* Update skb length/tail by "putting" the growth */
		safe_skb_put(irs->skb, decomp_len - crp->crp_olen);

		/* set the new header in the skb */
		skb_set_network_header(irs->skb,
				       ipsec_skb_offset(irs->skb, irs->iph));
		KLIPS_IP_PRINT(debug_rcv & DB_RX_PKTRX, ip_hdr(irs->skb));

		/* relese the backup copy */
		if (irs->pre_ipcomp_skb) {
			kfree_skb(irs->pre_ipcomp_skb);
			irs->pre_ipcomp_skb = NULL;
		}

		/* IPcomp finished, continue processing */
		irs->state = IPSEC_RSM_DECAP_CONT;
		break;
	}

bail:
	crypto_freereq(crp);
	crp = NULL;
	ipsec_ocf_queue_task(ipsec_rsm, irs);
	return 0;
}
Exemplo n.º 23
0
int ip6_forward(struct sk_buff *skb)
{
	struct dst_entry *dst = skb_dst(skb);
	struct ipv6hdr *hdr = ipv6_hdr(skb);
	struct inet6_skb_parm *opt = IP6CB(skb);
	struct net *net = dev_net(dst->dev);
	u32 mtu;

	if (net->ipv6.devconf_all->forwarding == 0)
		goto error;

	if (skb_warn_if_lro(skb))
		goto drop;

	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
		goto drop;
	}

	skb_forward_csum(skb);

	/*
	 *	We DO NOT make any processing on
	 *	RA packets, pushing them to user level AS IS
	 *	without ane WARRANTY that application will be able
	 *	to interpret them. The reason is that we
	 *	cannot make anything clever here.
	 *
	 *	We are not end-node, so that if packet contains
	 *	AH/ESP, we cannot make anything.
	 *	Defragmentation also would be mistake, RA packets
	 *	cannot be fragmented, because there is no warranty
	 *	that different fragments will go along one path. --ANK
	 */
	if (opt->ra) {
		u8 *ptr = skb_network_header(skb) + opt->ra;
		if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
			return 0;
	}

	/*
	 *	check and decrement ttl
	 */
	if (hdr->hop_limit <= 1) {
		/* Force OUTPUT device used as source address */
		skb->dev = dst->dev;
		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
		IP6_INC_STATS_BH(net,
				 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);

		kfree_skb(skb);
		return -ETIMEDOUT;
	}

	/* XXX: idev->cnf.proxy_ndp? */
	if (net->ipv6.devconf_all->proxy_ndp &&
	    pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
		int proxied = ip6_forward_proxy_check(skb);
		if (proxied > 0)
			return ip6_input(skb);
		else if (proxied < 0) {
			IP6_INC_STATS(net, ip6_dst_idev(dst),
				      IPSTATS_MIB_INDISCARDS);
			goto drop;
		}
	}

	if (!xfrm6_route_forward(skb)) {
		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
		goto drop;
	}
	dst = skb_dst(skb);

	/* IPv6 specs say nothing about it, but it is clear that we cannot
	   send redirects to source routed frames.
	   We don't send redirects to frames decapsulated from IPsec.
	 */
	if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0 &&
	    !skb_sec_path(skb)) {
		struct in6_addr *target = NULL;
		struct rt6_info *rt;
		struct neighbour *n = dst->neighbour;

		/*
		 *	incoming and outgoing devices are the same
		 *	send a redirect.
		 */

		rt = (struct rt6_info *) dst;
		if ((rt->rt6i_flags & RTF_GATEWAY))
			target = (struct in6_addr*)&n->primary_key;
		else
			target = &hdr->daddr;

		/* Limit redirects both by destination (here)
		   and by source (inside ndisc_send_redirect)
		 */
		if (xrlim_allow(dst, 1*HZ))
			ndisc_send_redirect(skb, n, target);
	} else {
		int addrtype = ipv6_addr_type(&hdr->saddr);

		/* This check is security critical. */
		if (addrtype == IPV6_ADDR_ANY ||
		    addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
			goto error;
		if (addrtype & IPV6_ADDR_LINKLOCAL) {
			icmpv6_send(skb, ICMPV6_DEST_UNREACH,
				    ICMPV6_NOT_NEIGHBOUR, 0);
			goto error;
		}
	}

	mtu = dst_mtu(dst);
	if (mtu < IPV6_MIN_MTU)
		mtu = IPV6_MIN_MTU;

	if (skb->len > mtu) {
		/* Again, force OUTPUT device used as source address */
		skb->dev = dst->dev;
		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
		IP6_INC_STATS_BH(net,
				 ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
		IP6_INC_STATS_BH(net,
				 ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
		kfree_skb(skb);
		return -EMSGSIZE;
	}

	if (skb_cow(skb, dst->dev->hard_header_len)) {
		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
		goto drop;
	}

	hdr = ipv6_hdr(skb);

	/* Mangling hops number delayed to point after skb COW */

	hdr->hop_limit--;

	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
	return NF_HOOK(PF_INET6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
		       ip6_forward_finish);

error:
	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
drop:
	kfree_skb(skb);
	return -EINVAL;
}
Exemplo n.º 24
0
static int ipsec_ocf_xmit_cb(struct cryptop *crp)
{
	struct ipsec_xmit_state *ixs =
		(struct ipsec_xmit_state *)crp->crp_opaque;
	struct iphdr *newiph;
	struct ipcomphdr *cmph;
	unsigned orig_len, comp_len;
	struct cryptodesc *crdc = NULL;

	KLIPS_PRINT(debug_tunnel & DB_TN_XMIT,
		    "klips_debug:ipsec_ocf_xmit_cb\n");

	if (ixs == NULL) {
		KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_ocf_xmit_cb: "
			    "NULL ixs in callback\n");
		return 0;
	}

	/*
	 * we must update the state before returning to the state machine.
	 * if we have an error,  terminate the processing by moving to the DONE
	 * state
	 */

	ixs->state = IPSEC_XSM_DONE; /* assume bad xmit */
	if (crp->crp_etype) {
		ptrdiff_t ptr_delta;

		if (crp->crp_etype == EAGAIN) {
			/* Session has been migrated. Store the new session id and retry */
			KLIPS_PRINT(debug_tunnel & DB_TN_XMIT,
				    "klips_debug:ipsec_ocf_xmit_cb: crypto session migrated\n");
			ixs->ipsp->ocf_cryptoid = crp->crp_sid;
			/* resubmit request */
			if (crypto_dispatch(crp) == 0)
				return 0;
			/* resubmit failed */
		}

		KLIPS_PRINT(debug_tunnel & DB_TN_XMIT, "klips_debug:ipsec_ocf_xmit_cb: "
			    "error in processing 0x%x\n",
			    crp->crp_etype);

		switch (ixs->ipsp->ips_said.proto) {
		case IPPROTO_COMP:
			/*
			 * It's ok for compression to fail... we made a clone
			 * of the packet, so we just revert it now...
			 */
			if (!ixs->pre_ipcomp_skb) {
				KLIPS_PRINT(debug_tunnel & DB_TN_XMIT,
					    "klips_debug:ipsec_ocf_xmit_cb: "
					    "IPcomp on %u bytes failed, "
					    "but we have no clone!\n",
					    (unsigned int)
					    (lsw_ip_hdr_version(ixs) == 6 ?
					     (ntohs(lsw_ip6_hdr(ixs)->
						    payload_len) +
					      sizeof(struct ipv6hdr))
					     :
					     ntohs(lsw_ip4_hdr(
							   ixs)->tot_len)) -
					    ixs->iphlen);
				/* this is a fail. */
				break;
			}

			KLIPS_PRINT(debug_tunnel & DB_TN_XMIT,
				    "klips_debug:ipsec_ocf_xmit_cb: "
				    "IPcomp on %u bytes failed, "
				    "using backup clone.\n",
				    (unsigned int)
				    (lsw_ip_hdr_version(ixs) == 6 ?
				     (ntohs(lsw_ip6_hdr(ixs)->payload_len) +
				      sizeof(struct ipv6hdr)) :
				     ntohs(lsw_ip4_hdr(ixs)->tot_len)) -
				    ixs->iphlen);

			ptr_delta = ixs->pre_ipcomp_skb->data - ixs->skb->data;
			ixs->iph = (void*)((char*)ixs->iph + ptr_delta);

			/*
			 * cannot free it here, because we are under
			 * IRQ, potentially, so queue it for later
			 */
			kfree_skb(ixs->skb);
			ixs->skb = ixs->pre_ipcomp_skb;
			ixs->pre_ipcomp_skb = NULL;

			skb_set_network_header(ixs->skb,
					       ipsec_skb_offset(ixs->skb,
								((void *)
								 skb_network_header(
									 ixs->skb))
								+
								ptr_delta));
			skb_set_transport_header(ixs->skb,
						 ipsec_skb_offset(ixs->skb,
								  ((void *)
								   skb_transport_header(
									   ixs->skb))
								  +
								  ptr_delta));
			KLIPS_IP_PRINT(debug_tunnel & DB_TN_XMIT, ixs->iph);

			/* this means we don't compress */
			ixs->state = IPSEC_XSM_CONT;
			break;
		}
		goto bail;
	}

	switch (ixs->ipsp->ips_said.proto) {
	case IPPROTO_ESP:
		/* ESP, nothing to do */
		break;

	case IPPROTO_AH:
		/* AH post processing, put back fields we had to zero */
		if (lsw_ip_hdr_version(ixs) == 4) {
			lsw_ip4_hdr(ixs)->ttl      = ixs->ttl;
			lsw_ip4_hdr(ixs)->check    = ixs->check;
			lsw_ip4_hdr(ixs)->frag_off = ixs->frag_off;
			lsw_ip4_hdr(ixs)->tos      = ixs->tos;
		}
		break;

	case IPPROTO_COMP:
		/* IPcomp fill in the header */
		crdc = crp->crp_desc;

		KLIPS_PRINT(debug_tunnel & DB_TN_XMIT,
			    "klips_debug:ipsec_ocf_xmit_cb: "
			    "after <%s%s%s>, SA:%s:\n",
			    IPS_XFORM_NAME(ixs->ipsp),
			    ixs->sa_len ? ixs->sa_txt : " (error)");
		KLIPS_IP_PRINT(debug_tunnel & DB_TN_XMIT, ixs->iph);

		orig_len = (lsw_ip_hdr_version(ixs) == 6 ?
			    (ntohs(lsw_ip6_hdr(ixs)->payload_len) +
			     sizeof(struct ipv6hdr)) :
			    ntohs(lsw_ip4_hdr(ixs)->tot_len)) -
			   ixs->iphlen;
		comp_len = crp->crp_olen;

		if (sysctl_ipsec_debug_ipcomp && sysctl_ipsec_debug_verbose) {
			ipsec_dmp_block("compress after",
					((unsigned char*)ixs->iph) + ixs->iphlen,
					comp_len);
		}

		newiph = (struct iphdr *)
			((char*)ixs->iph - sizeof(struct ipcomphdr));
		cmph = (struct ipcomphdr *)((char*)newiph + ixs->iphlen);

		/* move the ip header to make room for the new ipcomp header */
		memmove(((unsigned char *) ixs->skb->data) -
			sizeof(struct ipcomphdr),
			ixs->skb->data,
			(((unsigned char *) ixs->iph) + ixs->iphlen) -
			((unsigned char *) ixs->skb->data));
		/* DAVIDM check for head room */
		skb_push(ixs->skb, sizeof(struct ipcomphdr));

		ixs->iph = newiph;
		skb_set_network_header(ixs->skb,
				       ipsec_skb_offset(ixs->skb, newiph));
		skb_set_transport_header(ixs->skb,
					 ipsec_skb_offset(ixs->skb,
							  newiph) +
					 ixs->iphlen);

		/* now we can fill in the ipcomp header */
		cmph->ipcomp_nh = ixs->next_header;
		cmph->ipcomp_flags = 0;
		cmph->ipcomp_cpi =
			htons((__u16)(ntohl(ixs->ipsp->ips_said.spi) &
				      0x0000ffff));

		/* update the ip header to reflect the compression */
		if (lsw_ip_hdr_version(ixs) == 6) {
			lsw_ip6_hdr(ixs)->nexthdr = IPPROTO_COMP;
			lsw_ip6_hdr(ixs)->payload_len =
				htons(ixs->iphlen + sizeof(struct ipcomphdr) +
				      comp_len - sizeof(struct ipv6hdr));
		} else {
			lsw_ip4_hdr(ixs)->protocol = IPPROTO_COMP;
			lsw_ip4_hdr(ixs)->tot_len =
				htons(ixs->iphlen + sizeof(struct ipcomphdr) +
				      comp_len);
			lsw_ip4_hdr(ixs)->check = 0;
			lsw_ip4_hdr(ixs)->check =
				ip_fast_csum((char *) ixs->iph,
					     lsw_ip4_hdr(ixs)->ihl);
		}

		/* Update skb length/tail by "unputting" the shrinkage */
		safe_skb_put(ixs->skb, comp_len - orig_len);

		ixs->ipsp->ips_comp_adapt_skip = 0;
		ixs->ipsp->ips_comp_adapt_tries = 0;

		/* release the backup copy */
		if (ixs->pre_ipcomp_skb) {
			kfree_skb(ixs->pre_ipcomp_skb);
			ixs->pre_ipcomp_skb = NULL;
		}

		KLIPS_PRINT(debug_tunnel & DB_TN_XMIT,
			    "klips_debug:ipsec_ocf_xmit_cb: "
			    "after <%s%s%s>, SA:%s:\n",
			    IPS_XFORM_NAME(ixs->ipsp),
			    ixs->sa_len ? ixs->sa_txt : " (error)");
		KLIPS_IP_PRINT(debug_tunnel & DB_TN_XMIT, ixs->iph);
		break;
	}

	/* all good */
	ixs->state = IPSEC_XSM_CONT;

bail:
	crypto_freereq(crp);
	crp = NULL;
	ipsec_ocf_queue_task(ipsec_xsm, ixs);
	return 0;
}
Exemplo n.º 25
0
int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, int len,
		  int noblock, int flags, int *addr_len)
{
  	struct sk_buff *skb;
  	int copied, err;

  	if (addr_len)
  		*addr_len=sizeof(struct sockaddr_in6);
  
	if (flags & MSG_ERRQUEUE)
		return ipv6_recv_error(sk, msg, len);

	skb = skb_recv_datagram(sk, flags, noblock, &err);
	if (!skb)
		goto out;

 	copied = skb->len - sizeof(struct udphdr);
  	if (copied > len) {
  		copied = len;
  		msg->msg_flags |= MSG_TRUNC;
  	}

	if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
					      copied);
	} else if (msg->msg_flags&MSG_TRUNC) {
		if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)))
			goto csum_copy_err;
		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
					      copied);
	} else {
		err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
		if (err == -EINVAL)
			goto csum_copy_err;
	}
	if (err)
		goto out_free;

	sock_recv_timestamp(msg, sk, skb);

	/* Copy the address. */
	if (msg->msg_name) {
		struct sockaddr_in6 *sin6;
	  
		sin6 = (struct sockaddr_in6 *) msg->msg_name;
		sin6->sin6_family = AF_INET6;
		sin6->sin6_port = skb->h.uh->source;
		sin6->sin6_flowinfo = 0;
		sin6->sin6_scope_id = 0;

		if (skb->protocol == htons(ETH_P_IP)) {
			ipv6_addr_set(&sin6->sin6_addr, 0, 0,
				      htonl(0xffff), skb->nh.iph->saddr);
			if (sk->protinfo.af_inet.cmsg_flags)
				ip_cmsg_recv(msg, skb);
		} else {
			memcpy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr,
			       sizeof(struct in6_addr));

			if (sk->net_pinfo.af_inet6.rxopt.all)
				datagram_recv_ctl(sk, msg, skb);
			if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) {
				struct inet6_skb_parm *opt = (struct inet6_skb_parm *) skb->cb;
				sin6->sin6_scope_id = opt->iif;
			}
		}
  	}
	err = copied;

out_free:
	skb_free_datagram(sk, skb);
out:
	return err;

csum_copy_err:
	/* Clear queue. */
	if (flags&MSG_PEEK) {
		int clear = 0;
		spin_lock_irq(&sk->receive_queue.lock);
		if (skb == skb_peek(&sk->receive_queue)) {
			__skb_unlink(skb, &sk->receive_queue);
			clear = 1;
		}
		spin_unlock_irq(&sk->receive_queue.lock);
		if (clear)
			kfree_skb(skb);
	}

	/* Error for blocking case is chosen to masquerade
	   as some normal condition.
	 */
	err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
	UDP6_INC_STATS_USER(UdpInErrors);
	goto out_free;
}
Exemplo n.º 26
0
static struct sk_buff *netlink_build_message(ipq_queue_element_t *e, int *errp)
{
	unsigned char *old_tail;
	size_t size = 0;
	size_t data_len = 0;
	struct sk_buff *skb;
	ipq_packet_msg_t *pm;
	struct nlmsghdr *nlh;

	switch (nlq->peer.copy_mode) {
		size_t copy_range;

		case IPQ_COPY_META:
			size = NLMSG_SPACE(sizeof(*pm));
			data_len = 0;
			break;
		case IPQ_COPY_PACKET:
			copy_range = nlq->peer.copy_range;
			if (copy_range == 0 || copy_range > e->skb->len)
				data_len = e->skb->len;
			else
				data_len = copy_range;
			size = NLMSG_SPACE(sizeof(*pm) + data_len);
			
			break;
		case IPQ_COPY_NONE:
		default:
			*errp = -EINVAL;
			return NULL;
	}
	skb = alloc_skb(size, GFP_ATOMIC);
	if (!skb)
		goto nlmsg_failure;
	old_tail = skb->tail;
	nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh));
	pm = NLMSG_DATA(nlh);
	memset(pm, 0, sizeof(*pm));
	pm->packet_id = (unsigned long )e;
	pm->data_len = data_len;
	pm->timestamp_sec = e->skb->stamp.tv_sec;
	pm->timestamp_usec = e->skb->stamp.tv_usec;
	pm->mark = e->skb->nfmark;
	pm->hook = e->info->hook;
	if (e->info->indev) strcpy(pm->indev_name, e->info->indev->name);
	else pm->indev_name[0] = '\0';
	if (e->info->outdev) strcpy(pm->outdev_name, e->info->outdev->name);
	else pm->outdev_name[0] = '\0';
	pm->hw_protocol = e->skb->protocol;
	if (e->info->indev && e->skb->dev) {
		pm->hw_type = e->skb->dev->type;
		if (e->skb->dev->hard_header_parse)
			pm->hw_addrlen =
				e->skb->dev->hard_header_parse(e->skb,
				                               pm->hw_addr);
	}
	if (data_len)
		memcpy(pm->payload, e->skb->data, data_len);
	nlh->nlmsg_len = skb->tail - old_tail;
	NETLINK_CB(skb).dst_groups = 0;
	return skb;
nlmsg_failure:
	if (skb)
		kfree_skb(skb);
	*errp = 0;
	printk(KERN_ERR "ip_queue: error creating netlink message\n");
	return NULL;
}
Exemplo n.º 27
0
/*
 * This function handles the user application commands for driver.
 *
 * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
 * handlers respectively.
 *
 * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
 * value of the actual command execution is replied to the user application.
 *
 * If there's any message responding to the user space, IWL_TM_ATTR_SYNC_RSP
 * is used for carry the message while IWL_TM_ATTR_COMMAND must set to
 * IWL_TM_CMD_DEV2APP_SYNC_RSP.
 *
 * @hw: ieee80211_hw object that represents the device
 * @tb: gnl message fields from the user space
 */
static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
{
	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
	struct iwl_trans *trans = priv->trans;
	struct sk_buff *skb;
	unsigned char *rsp_data_ptr = NULL;
	int status = 0, rsp_data_len = 0;
	u32 inst_size = 0, data_size = 0;
	const struct fw_img *img;

	switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
	case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
		rsp_data_ptr = (unsigned char *)priv->cfg->name;
		rsp_data_len = strlen(priv->cfg->name);
		skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
							rsp_data_len + 20);
		if (!skb) {
			IWL_ERR(priv, "Memory allocation fail\n");
			return -ENOMEM;
		}
		if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
				IWL_TM_CMD_DEV2APP_SYNC_RSP) ||
		    nla_put(skb, IWL_TM_ATTR_SYNC_RSP,
			    rsp_data_len, rsp_data_ptr))
			goto nla_put_failure;
		status = cfg80211_testmode_reply(skb);
		if (status < 0)
			IWL_ERR(priv, "Error sending msg : %d\n", status);
		break;

	case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
		status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
		if (status)
			IWL_ERR(priv, "Error loading init ucode: %d\n", status);
		break;

	case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
		iwl_testmode_cfg_init_calib(priv);
		priv->ucode_loaded = false;
		iwl_trans_stop_device(trans);
		break;

	case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
		status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
		if (status) {
			IWL_ERR(priv,
				"Error loading runtime ucode: %d\n", status);
			break;
		}
		status = iwl_alive_start(priv);
		if (status)
			IWL_ERR(priv,
				"Error starting the device: %d\n", status);
		break;

	case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
		iwl_scan_cancel_timeout(priv, 200);
		priv->ucode_loaded = false;
		iwl_trans_stop_device(trans);
		status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
		if (status) {
			IWL_ERR(priv,
				"Error loading WOWLAN ucode: %d\n", status);
			break;
		}
		status = iwl_alive_start(priv);
		if (status)
			IWL_ERR(priv,
				"Error starting the device: %d\n", status);
		break;

	case IWL_TM_CMD_APP2DEV_GET_EEPROM:
		if (priv->eeprom_blob) {
			skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
				priv->eeprom_blob_size + 20);
			if (!skb) {
				IWL_ERR(priv, "Memory allocation fail\n");
				return -ENOMEM;
			}
			if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
					IWL_TM_CMD_DEV2APP_EEPROM_RSP) ||
			    nla_put(skb, IWL_TM_ATTR_EEPROM,
				    priv->eeprom_blob_size,
				    priv->eeprom_blob))
				goto nla_put_failure;
			status = cfg80211_testmode_reply(skb);
			if (status < 0)
				IWL_ERR(priv, "Error sending msg : %d\n",
					status);
		} else
			return -ENODATA;
		break;

	case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
		if (!tb[IWL_TM_ATTR_FIXRATE]) {
			IWL_ERR(priv, "Missing fixrate setting\n");
			return -ENOMSG;
		}
		priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
		break;

	case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
		skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20 + 8);
		if (!skb) {
			IWL_ERR(priv, "Memory allocation fail\n");
			return -ENOMEM;
		}
		if (!priv->ucode_loaded) {
			IWL_ERR(priv, "No uCode has not been loaded\n");
			return -EINVAL;
		} else {
			img = &priv->fw->img[priv->cur_ucode];
			inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
			data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
		}
		if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode) ||
		    nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) ||
		    nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size))
			goto nla_put_failure;
		status = cfg80211_testmode_reply(skb);
		if (status < 0)
			IWL_ERR(priv, "Error sending msg : %d\n", status);
		break;

	default:
		IWL_ERR(priv, "Unknown testmode driver command ID\n");
		return -ENOSYS;
	}
	return status;

nla_put_failure:
	kfree_skb(skb);
	return -EMSGSIZE;
}
Exemplo n.º 28
0
/*
 * free a message buffer
 */
void diva_os_free_message_buffer(diva_os_message_buffer_s * dmb)
{
	kfree_skb(dmb);
}
Exemplo n.º 29
0
int ip6_forward(struct sk_buff *skb)
{
	struct dst_entry *dst = skb->dst;
	struct ipv6hdr *hdr = skb->nh.ipv6h;
	struct inet6_skb_parm *opt = IP6CB(skb);
	
	if (ipv6_devconf.forwarding == 0)
		goto error;

	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
		IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);
		goto drop;
	}

	skb->ip_summed = CHECKSUM_NONE;

	/*
	 *	We DO NOT make any processing on
	 *	RA packets, pushing them to user level AS IS
	 *	without ane WARRANTY that application will be able
	 *	to interpret them. The reason is that we
	 *	cannot make anything clever here.
	 *
	 *	We are not end-node, so that if packet contains
	 *	AH/ESP, we cannot make anything.
	 *	Defragmentation also would be mistake, RA packets
	 *	cannot be fragmented, because there is no warranty
	 *	that different fragments will go along one path. --ANK
	 */
	if (opt->ra) {
		u8 *ptr = skb->nh.raw + opt->ra;
		if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
			return 0;
	}

	/*
	 *	check and decrement ttl
	 */
	if (hdr->hop_limit <= 1) {
		/* Force OUTPUT device used as source address */
		skb->dev = dst->dev;
		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
			    0, skb->dev);

		kfree_skb(skb);
		return -ETIMEDOUT;
	}

	if (!xfrm6_route_forward(skb)) {
		IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);
		goto drop;
	}
	dst = skb->dst;

	/* IPv6 specs say nothing about it, but it is clear that we cannot
	   send redirects to source routed frames.
	 */
	if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) {
		struct in6_addr *target = NULL;
		struct rt6_info *rt;
		struct neighbour *n = dst->neighbour;

		/*
		 *	incoming and outgoing devices are the same
		 *	send a redirect.
		 */

		rt = (struct rt6_info *) dst;
		if ((rt->rt6i_flags & RTF_GATEWAY))
			target = (struct in6_addr*)&n->primary_key;
		else
			target = &hdr->daddr;

		/* Limit redirects both by destination (here)
		   and by source (inside ndisc_send_redirect)
		 */
		if (xrlim_allow(dst, 1*HZ))
			ndisc_send_redirect(skb, n, target);
	} else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK
						|IPV6_ADDR_LINKLOCAL)) {
		/* This check is security critical. */
		goto error;
	}

	if (skb->len > dst_mtu(dst)) {
		/* Again, force OUTPUT device used as source address */
		skb->dev = dst->dev;
		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);
		IP6_INC_STATS_BH(IPSTATS_MIB_INTOOBIGERRORS);
		IP6_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS);
		kfree_skb(skb);
		return -EMSGSIZE;
	}

	if (skb_cow(skb, dst->dev->hard_header_len)) {
		IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
		goto drop;
	}

	hdr = skb->nh.ipv6h;

	/* Mangling hops number delayed to point after skb COW */
 
	hdr->hop_limit--;

	IP6_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);
	return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);

error:
	IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
drop:
	kfree_skb(skb);
	return -EINVAL;
}
Exemplo n.º 30
0
/* This requires some explaining. If DNAT has taken place,
 * we will need to fix up the destination Ethernet address.
 * This is also true when SNAT takes place (for the reply direction).
 *
 * There are two cases to consider:
 * 1. The packet was DNAT'ed to a device in the same bridge
 *    port group as it was received on. We can still bridge
 *    the packet.
 * 2. The packet was DNAT'ed to a different device, either
 *    a non-bridged device or another bridge port group.
 *    The packet will need to be routed.
 *
 * The correct way of distinguishing between these two cases is to
 * call ip_route_input() and to look at skb->dst->dev, which is
 * changed to the destination device if ip_route_input() succeeds.
 *
 * Let's first consider the case that ip_route_input() succeeds:
 *
 * If the output device equals the logical bridge device the packet
 * came in on, we can consider this bridging. The corresponding MAC
 * address will be obtained in br_nf_pre_routing_finish_bridge.
 * Otherwise, the packet is considered to be routed and we just
 * change the destination MAC address so that the packet will
 * later be passed up to the IP stack to be routed. For a redirected
 * packet, ip_route_input() will give back the localhost as output device,
 * which differs from the bridge device.
 *
 * Let's now consider the case that ip_route_input() fails:
 *
 * This can be because the destination address is martian, in which case
 * the packet will be dropped.
 * If IP forwarding is disabled, ip_route_input() will fail, while
 * ip_route_output_key() can return success. The source
 * address for ip_route_output_key() is set to zero, so ip_route_output_key()
 * thinks we're handling a locally generated packet and won't care
 * if IP forwarding is enabled. If the output device equals the logical bridge
 * device, we proceed as if ip_route_input() succeeded. If it differs from the
 * logical bridge port or if ip_route_output_key() fails we drop the packet.
 */
static int br_nf_pre_routing_finish(struct sock *sk, struct sk_buff *skb)
{
    struct net_device *dev = skb->dev;
    struct iphdr *iph = ip_hdr(skb);
    struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
    struct rtable *rt;
    int err;
    int frag_max_size;

    frag_max_size = IPCB(skb)->frag_max_size;
    BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size;

    if (nf_bridge->pkt_otherhost) {
        skb->pkt_type = PACKET_OTHERHOST;
        nf_bridge->pkt_otherhost = false;
    }
    nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
    if (daddr_was_changed(skb, nf_bridge)) {
        if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
            struct in_device *in_dev = __in_dev_get_rcu(dev);

            /* If err equals -EHOSTUNREACH the error is due to a
             * martian destination or due to the fact that
             * forwarding is disabled. For most martian packets,
             * ip_route_output_key() will fail. It won't fail for 2 types of
             * martian destinations: loopback destinations and destination
             * 0.0.0.0. In both cases the packet will be dropped because the
             * destination is the loopback device and not the bridge. */
            if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
                goto free_skb;

            rt = ip_route_output(dev_net(dev), iph->daddr, 0,
                                 RT_TOS(iph->tos), 0);
            if (!IS_ERR(rt)) {
                /* - Bridged-and-DNAT'ed traffic doesn't
                 *   require ip_forwarding. */
                if (rt->dst.dev == dev) {
                    skb_dst_set(skb, &rt->dst);
                    goto bridged_dnat;
                }
                ip_rt_put(rt);
            }
free_skb:
            kfree_skb(skb);
            return 0;
        } else {
            if (skb_dst(skb)->dev == dev) {
bridged_dnat:
                skb->dev = nf_bridge->physindev;
                nf_bridge_update_protocol(skb);
                nf_bridge_push_encap_header(skb);
                NF_HOOK_THRESH(NFPROTO_BRIDGE,
                               NF_BR_PRE_ROUTING,
                               sk, skb, skb->dev, NULL,
                               br_nf_pre_routing_finish_bridge,
                               1);
                return 0;
            }
            ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
            skb->pkt_type = PACKET_HOST;
        }
    } else {
        rt = bridge_parent_rtable(nf_bridge->physindev);
        if (!rt) {
            kfree_skb(skb);
            return 0;
        }
        skb_dst_set_noref(skb, &rt->dst);
    }

    skb->dev = nf_bridge->physindev;
    nf_bridge_update_protocol(skb);
    nf_bridge_push_encap_header(skb);
    NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, sk, skb,
                   skb->dev, NULL,
                   br_handle_frame_finish, 1);

    return 0;
}