static void ipt_ulog_packet(unsigned int hooknum,
			    const struct sk_buff *skb,
			    const struct net_device *in,
			    const struct net_device *out,
			    const struct ipt_ulog_info *loginfo,
			    const char *prefix)
{
	ulog_buff_t *ub;
	ulog_packet_msg_t *pm;
	size_t size, copy_len;
	struct nlmsghdr *nlh;
	struct timeval tv;

	unsigned int groupnum = ffs(loginfo->nl_group) - 1;

	
	if (loginfo->copy_range == 0 || loginfo->copy_range > skb->len)
		copy_len = skb->len;
	else
		copy_len = loginfo->copy_range;

	size = NLMSG_SPACE(sizeof(*pm) + copy_len);

	ub = &ulog_buffers[groupnum];

	spin_lock_bh(&ulog_lock);

	if (!ub->skb) {
		if (!(ub->skb = ulog_alloc_skb(size)))
			goto alloc_failure;
	} else if (ub->qlen >= loginfo->qthreshold ||
		   size > skb_tailroom(ub->skb)) {

		ulog_send(groupnum);

		if (!(ub->skb = ulog_alloc_skb(size)))
			goto alloc_failure;
	}

	pr_debug("qlen %d, qthreshold %Zu\n", ub->qlen, loginfo->qthreshold);

	
	nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
			sizeof(*pm)+copy_len);
	ub->qlen++;

	pm = NLMSG_DATA(nlh);

	
	if (skb->tstamp.tv64 == 0)
		__net_timestamp((struct sk_buff *)skb);

	
	pm->data_len = copy_len;
	tv = ktime_to_timeval(skb->tstamp);
	put_unaligned(tv.tv_sec, &pm->timestamp_sec);
	put_unaligned(tv.tv_usec, &pm->timestamp_usec);
	put_unaligned(skb->mark, &pm->mark);
	pm->hook = hooknum;
	if (prefix != NULL)
		strncpy(pm->prefix, prefix, sizeof(pm->prefix));
	else if (loginfo->prefix[0] != '\0')
		strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
	else
		*(pm->prefix) = '\0';

	if (in && in->hard_header_len > 0 &&
	    skb->mac_header != skb->network_header &&
	    in->hard_header_len <= ULOG_MAC_LEN) {
		memcpy(pm->mac, skb_mac_header(skb), in->hard_header_len);
		pm->mac_len = in->hard_header_len;
	} else
		pm->mac_len = 0;

	if (in)
		strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));
	else
		pm->indev_name[0] = '\0';

	if (out)
		strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
	else
		pm->outdev_name[0] = '\0';

	
	if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
		BUG();

	
	if (ub->qlen > 1)
		ub->lastnlh->nlmsg_flags |= NLM_F_MULTI;

	ub->lastnlh = nlh;

	
	if (!timer_pending(&ub->timer)) {
		ub->timer.expires = jiffies + flushtimeout * HZ / 100;
		add_timer(&ub->timer);
	}

	
	if (ub->qlen >= loginfo->qthreshold) {
		if (loginfo->qthreshold > 1)
			nlh->nlmsg_type = NLMSG_DONE;
		ulog_send(groupnum);
	}

	spin_unlock_bh(&ulog_lock);

	return;

nlmsg_failure:
	pr_debug("error during NLMSG_PUT\n");
alloc_failure:
	pr_debug("Error building netlink message\n");
	spin_unlock_bh(&ulog_lock);
}
Пример #2
0
/**
 * ixgbe_ipsec_tx - setup Tx flags for ipsec offload
 * @tx_ring: outgoing context
 * @first: current data packet
 * @itd: ipsec Tx data for later use in building context descriptor
 **/
int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
		   struct ixgbe_tx_buffer *first,
		   struct ixgbe_ipsec_tx_data *itd)
{
	struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
	struct ixgbe_ipsec *ipsec = adapter->ipsec;
	struct xfrm_state *xs;
	struct tx_sa *tsa;

	if (unlikely(!first->skb->sp->len)) {
		netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
			   __func__, first->skb->sp->len);
		return 0;
	}

	xs = xfrm_input_state(first->skb);
	if (unlikely(!xs)) {
		netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
			   __func__, xs);
		return 0;
	}

	itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
	if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
		netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
			   __func__, itd->sa_idx, xs->xso.offload_handle);
		return 0;
	}

	tsa = &ipsec->tx_tbl[itd->sa_idx];
	if (unlikely(!tsa->used)) {
		netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
			   __func__, itd->sa_idx);
		return 0;
	}

	first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC;

	if (xs->id.proto == IPPROTO_ESP) {

		itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
			      IXGBE_ADVTXD_TUCMD_L4T_TCP;
		if (first->protocol == htons(ETH_P_IP))
			itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;

		/* The actual trailer length is authlen (16 bytes) plus
		 * 2 bytes for the proto and the padlen values, plus
		 * padlen bytes of padding.  This ends up not the same
		 * as the static value found in xs->props.trailer_len (21).
		 *
		 * ... but if we're doing GSO, don't bother as the stack
		 * doesn't add a trailer for those.
		 */
		if (!skb_is_gso(first->skb)) {
			/* The "correct" way to get the auth length would be
			 * to use
			 *    authlen = crypto_aead_authsize(xs->data);
			 * but since we know we only have one size to worry
			 * about * we can let the compiler use the constant
			 * and save us a few CPU cycles.
			 */
			const int authlen = IXGBE_IPSEC_AUTH_BITS / 8;
			struct sk_buff *skb = first->skb;
			u8 padlen;
			int ret;

			ret = skb_copy_bits(skb, skb->len - (authlen + 2),
					    &padlen, 1);
			if (unlikely(ret))
				return 0;
			itd->trailer_len = authlen + 2 + padlen;
		}
	}
	if (tsa->encrypt)
		itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;

	return 1;
}
Пример #3
0
int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
		 int (*output)(struct net *, struct sock *, struct sk_buff *))
{
	struct sk_buff *frag;
	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
	struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
				inet6_sk(skb->sk) : NULL;
	struct ipv6hdr *tmp_hdr;
	struct frag_hdr *fh;
	unsigned int mtu, hlen, left, len;
	int hroom, troom;
	__be32 frag_id;
	int ptr, offset = 0, err = 0;
	u8 *prevhdr, nexthdr = 0;

	hlen = ip6_find_1stfragopt(skb, &prevhdr);
	nexthdr = *prevhdr;

	mtu = ip6_skb_dst_mtu(skb);

	/* We must not fragment if the socket is set to force MTU discovery
	 * or if the skb it not generated by a local socket.
	 */
	if (unlikely(!skb->ignore_df && skb->len > mtu))
		goto fail_toobig;

	if (IP6CB(skb)->frag_max_size) {
		if (IP6CB(skb)->frag_max_size > mtu)
			goto fail_toobig;

		/* don't send fragments larger than what we received */
		mtu = IP6CB(skb)->frag_max_size;
		if (mtu < IPV6_MIN_MTU)
			mtu = IPV6_MIN_MTU;
	}

	if (np && np->frag_size < mtu) {
		if (np->frag_size)
			mtu = np->frag_size;
	}
	if (mtu < hlen + sizeof(struct frag_hdr) + 8)
		goto fail_toobig;
	mtu -= hlen + sizeof(struct frag_hdr);

	frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
				    &ipv6_hdr(skb)->saddr);

	if (skb->ip_summed == CHECKSUM_PARTIAL &&
	    (err = skb_checksum_help(skb)))
		goto fail;

	hroom = LL_RESERVED_SPACE(rt->dst.dev);
	if (skb_has_frag_list(skb)) {
		int first_len = skb_pagelen(skb);
		struct sk_buff *frag2;

		if (first_len - hlen > mtu ||
		    ((first_len - hlen) & 7) ||
		    skb_cloned(skb) ||
		    skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
			goto slow_path;

		skb_walk_frags(skb, frag) {
			/* Correct geometry. */
			if (frag->len > mtu ||
			    ((frag->len & 7) && frag->next) ||
			    skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr)))
				goto slow_path_clean;

			/* Partially cloned skb? */
			if (skb_shared(frag))
				goto slow_path_clean;

			BUG_ON(frag->sk);
			if (skb->sk) {
				frag->sk = skb->sk;
				frag->destructor = sock_wfree;
			}
			skb->truesize -= frag->truesize;
		}

		err = 0;
		offset = 0;
		/* BUILD HEADER */

		*prevhdr = NEXTHDR_FRAGMENT;
		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
		if (!tmp_hdr) {
			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
				      IPSTATS_MIB_FRAGFAILS);
			err = -ENOMEM;
			goto fail;
		}
		frag = skb_shinfo(skb)->frag_list;
		skb_frag_list_init(skb);

		__skb_pull(skb, hlen);
		fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr));
		__skb_push(skb, hlen);
		skb_reset_network_header(skb);
		memcpy(skb_network_header(skb), tmp_hdr, hlen);

		fh->nexthdr = nexthdr;
		fh->reserved = 0;
		fh->frag_off = htons(IP6_MF);
		fh->identification = frag_id;

		first_len = skb_pagelen(skb);
		skb->data_len = first_len - skb_headlen(skb);
		skb->len = first_len;
		ipv6_hdr(skb)->payload_len = htons(first_len -
						   sizeof(struct ipv6hdr));

		dst_hold(&rt->dst);

		for (;;) {
			/* Prepare header of the next frame,
			 * before previous one went down. */
			if (frag) {
				frag->ip_summed = CHECKSUM_NONE;
				skb_reset_transport_header(frag);
				fh = (struct frag_hdr *)__skb_push(frag, sizeof(struct frag_hdr));
				__skb_push(frag, hlen);
				skb_reset_network_header(frag);
				memcpy(skb_network_header(frag), tmp_hdr,
				       hlen);
				offset += skb->len - hlen - sizeof(struct frag_hdr);
				fh->nexthdr = nexthdr;
				fh->reserved = 0;
				fh->frag_off = htons(offset);
				if (frag->next)
					fh->frag_off |= htons(IP6_MF);
				fh->identification = frag_id;
				ipv6_hdr(frag)->payload_len =
						htons(frag->len -
						      sizeof(struct ipv6hdr));
				ip6_copy_metadata(frag, skb);
			}

			err = output(net, sk, skb);
			if (!err)
				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
					      IPSTATS_MIB_FRAGCREATES);

			if (err || !frag)
				break;

			skb = frag;
			frag = skb->next;
			skb->next = NULL;
		}

		kfree(tmp_hdr);

		if (err == 0) {
			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
				      IPSTATS_MIB_FRAGOKS);
			ip6_rt_put(rt);
			return 0;
		}

		kfree_skb_list(frag);

		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
			      IPSTATS_MIB_FRAGFAILS);
		ip6_rt_put(rt);
		return err;

slow_path_clean:
		skb_walk_frags(skb, frag2) {
			if (frag2 == frag)
				break;
			frag2->sk = NULL;
			frag2->destructor = NULL;
			skb->truesize += frag2->truesize;
		}
	}

slow_path:
	left = skb->len - hlen;		/* Space per frame */
	ptr = hlen;			/* Where to start from */

	/*
	 *	Fragment the datagram.
	 */

	*prevhdr = NEXTHDR_FRAGMENT;
	troom = rt->dst.dev->needed_tailroom;

	/*
	 *	Keep copying data until we run out.
	 */
	while (left > 0)	{
		len = left;
		/* IF: it doesn't fit, use 'mtu' - the data space left */
		if (len > mtu)
			len = mtu;
		/* IF: we are not sending up to and including the packet end
		   then align the next start on an eight byte boundary */
		if (len < left)	{
			len &= ~7;
		}

		/* Allocate buffer */
		frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
				 hroom + troom, GFP_ATOMIC);
		if (!frag) {
			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
				      IPSTATS_MIB_FRAGFAILS);
			err = -ENOMEM;
			goto fail;
		}

		/*
		 *	Set up data on packet
		 */

		ip6_copy_metadata(frag, skb);
		skb_reserve(frag, hroom);
		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
		skb_reset_network_header(frag);
		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
		frag->transport_header = (frag->network_header + hlen +
					  sizeof(struct frag_hdr));

		/*
		 *	Charge the memory for the fragment to any owner
		 *	it might possess
		 */
		if (skb->sk)
			skb_set_owner_w(frag, skb->sk);

		/*
		 *	Copy the packet header into the new buffer.
		 */
		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);

		/*
		 *	Build fragment header.
		 */
		fh->nexthdr = nexthdr;
		fh->reserved = 0;
		fh->identification = frag_id;

		/*
		 *	Copy a block of the IP datagram.
		 */
		BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
				     len));
		left -= len;

		fh->frag_off = htons(offset);
		if (left > 0)
			fh->frag_off |= htons(IP6_MF);
		ipv6_hdr(frag)->payload_len = htons(frag->len -
						    sizeof(struct ipv6hdr));

		ptr += len;
		offset += len;

		/*
		 *	Put this fragment into the sending queue.
		 */
		err = output(net, sk, frag);
		if (err)
			goto fail;

		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
			      IPSTATS_MIB_FRAGCREATES);
	}
	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
		      IPSTATS_MIB_FRAGOKS);
	consume_skb(skb);
	return err;

fail_toobig:
	if (skb->sk && dst_allfrag(skb_dst(skb)))
		sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);

	skb->dev = skb_dst(skb)->dev;
	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
	err = -EMSGSIZE;

fail:
	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
		      IPSTATS_MIB_FRAGFAILS);
	kfree_skb(skb);
	return err;
}
Пример #4
0
/* This is an inline function, we don't really care about a long
 * list of arguments */
static inline int
__build_packet_message(struct nfnl_log_net *log,
			struct nfulnl_instance *inst,
			const struct sk_buff *skb,
			unsigned int data_len,
			u_int8_t pf,
			unsigned int hooknum,
			const struct net_device *indev,
			const struct net_device *outdev,
			const char *prefix, unsigned int plen)
{
	struct nfulnl_msg_packet_hdr pmsg;
	struct nlmsghdr *nlh;
	struct nfgenmsg *nfmsg;
	sk_buff_data_t old_tail = inst->skb->tail;
	struct sock *sk;
	const unsigned char *hwhdrp;

	nlh = nlmsg_put(inst->skb, 0, 0,
			NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
			sizeof(struct nfgenmsg), 0);
	if (!nlh)
		return -1;
	nfmsg = nlmsg_data(nlh);
	nfmsg->nfgen_family = pf;
	nfmsg->version = NFNETLINK_V0;
	nfmsg->res_id = htons(inst->group_num);

	memset(&pmsg, 0, sizeof(pmsg));
	pmsg.hw_protocol	= skb->protocol;
	pmsg.hook		= hooknum;

	if (nla_put(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg))
		goto nla_put_failure;

	if (prefix &&
	    nla_put(inst->skb, NFULA_PREFIX, plen, prefix))
		goto nla_put_failure;

	if (indev) {
#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
		if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
				 htonl(indev->ifindex)))
			goto nla_put_failure;
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical input device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
					 htonl(indev->ifindex)) ||
			/* this is the bridge group "brX" */
			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
			    nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
					 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
				goto nla_put_failure;
		} else {
			struct net_device *physindev;

			/* Case 2: indev is bridge group, we need to look for
			 * physical device (when called from ipv4) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
					 htonl(indev->ifindex)))
				goto nla_put_failure;

			physindev = nf_bridge_get_physindev(skb);
			if (physindev &&
			    nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
					 htonl(physindev->ifindex)))
				goto nla_put_failure;
		}
#endif
	}

	if (outdev) {
#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
		if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
				 htonl(outdev->ifindex)))
			goto nla_put_failure;
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical output device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
					 htonl(outdev->ifindex)) ||
			/* this is the bridge group "brX" */
			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
			    nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
					 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
				goto nla_put_failure;
		} else {
			struct net_device *physoutdev;

			/* Case 2: indev is a bridge group, we need to look
			 * for physical device (when called from ipv4) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
					 htonl(outdev->ifindex)))
				goto nla_put_failure;

			physoutdev = nf_bridge_get_physoutdev(skb);
			if (physoutdev &&
			    nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
					 htonl(physoutdev->ifindex)))
				goto nla_put_failure;
		}
#endif
	}

	if (skb->mark &&
	    nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark)))
		goto nla_put_failure;

	if (indev && skb->dev &&
	    skb->mac_header != skb->network_header) {
		struct nfulnl_msg_packet_hw phw;
		int len;

		memset(&phw, 0, sizeof(phw));
		len = dev_parse_header(skb, phw.hw_addr);
		if (len > 0) {
			phw.hw_addrlen = htons(len);
			if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
				goto nla_put_failure;
		}
	}

	if (indev && skb_mac_header_was_set(skb)) {
		if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
		    nla_put_be16(inst->skb, NFULA_HWLEN,
				 htons(skb->dev->hard_header_len)))
			goto nla_put_failure;

		hwhdrp = skb_mac_header(skb);

		if (skb->dev->type == ARPHRD_SIT)
			hwhdrp -= ETH_HLEN;

		if (hwhdrp >= skb->head &&
		    nla_put(inst->skb, NFULA_HWHEADER,
			    skb->dev->hard_header_len, hwhdrp))
			goto nla_put_failure;
	}

	if (skb->tstamp.tv64) {
		struct nfulnl_msg_packet_timestamp ts;
		struct timeval tv = ktime_to_timeval(skb->tstamp);
		ts.sec = cpu_to_be64(tv.tv_sec);
		ts.usec = cpu_to_be64(tv.tv_usec);

		if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts))
			goto nla_put_failure;
	}

	/* UID */
	sk = skb->sk;
	if (sk && sk_fullsock(sk)) {
		read_lock_bh(&sk->sk_callback_lock);
		if (sk->sk_socket && sk->sk_socket->file) {
			struct file *file = sk->sk_socket->file;
			const struct cred *cred = file->f_cred;
			struct user_namespace *user_ns = inst->peer_user_ns;
			__be32 uid = htonl(from_kuid_munged(user_ns, cred->fsuid));
			__be32 gid = htonl(from_kgid_munged(user_ns, cred->fsgid));
			read_unlock_bh(&sk->sk_callback_lock);
			if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
			    nla_put_be32(inst->skb, NFULA_GID, gid))
				goto nla_put_failure;
		} else
			read_unlock_bh(&sk->sk_callback_lock);
	}

	/* local sequence number */
	if ((inst->flags & NFULNL_CFG_F_SEQ) &&
	    nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++)))
		goto nla_put_failure;

	/* global sequence number */
	if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
	    nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
			 htonl(atomic_inc_return(&log->global_seq))))
		goto nla_put_failure;

	if (data_len) {
		struct nlattr *nla;
		int size = nla_attr_size(data_len);

		if (skb_tailroom(inst->skb) < nla_total_size(data_len))
			goto nla_put_failure;

		nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len));
		nla->nla_type = NFULA_PAYLOAD;
		nla->nla_len = size;

		if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
			BUG();
	}

	nlh->nlmsg_len = inst->skb->tail - old_tail;
	return 0;

nla_put_failure:
	PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
	return -1;
}
Пример #5
0
static void ipt_ulog_packet(struct net *net,
			    unsigned int hooknum,
			    const struct sk_buff *skb,
			    const struct net_device *in,
			    const struct net_device *out,
			    const struct ipt_ulog_info *loginfo,
			    const char *prefix)
{
	ulog_buff_t *ub;
	ulog_packet_msg_t *pm;
	size_t size, copy_len;
	struct nlmsghdr *nlh;
	struct timeval tv;
	struct ulog_net *ulog = ulog_pernet(net);

	/* ffs == find first bit set, necessary because userspace
	 * is already shifting groupnumber, but we need unshifted.
	 * ffs() returns [1..32], we need [0..31] */
	unsigned int groupnum = ffs(loginfo->nl_group) - 1;

	/* calculate the size of the skb needed */
	if (loginfo->copy_range == 0 || loginfo->copy_range > skb->len)
		copy_len = skb->len;
	else
		copy_len = loginfo->copy_range;

	size = nlmsg_total_size(sizeof(*pm) + copy_len);

	ub = &ulog->ulog_buffers[groupnum];

	spin_lock_bh(&ulog->lock);

	if (!ub->skb) {
		if (!(ub->skb = ulog_alloc_skb(size)))
			goto alloc_failure;
	} else if (ub->qlen >= loginfo->qthreshold ||
		   size > skb_tailroom(ub->skb)) {
		/* either the queue len is too high or we don't have
		 * enough room in nlskb left. send it to userspace. */

		ulog_send(ulog, groupnum);

		if (!(ub->skb = ulog_alloc_skb(size)))
			goto alloc_failure;
	}

	pr_debug("qlen %d, qthreshold %Zu\n", ub->qlen, loginfo->qthreshold);

	nlh = nlmsg_put(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
			sizeof(*pm)+copy_len, 0);
	if (!nlh) {
		pr_debug("error during nlmsg_put\n");
		goto out_unlock;
	}
	ub->qlen++;

	pm = nlmsg_data(nlh);
	memset(pm, 0, sizeof(*pm));

	/* We might not have a timestamp, get one */
	if (skb->tstamp.tv64 == 0)
		__net_timestamp((struct sk_buff *)skb);

	/* copy hook, prefix, timestamp, payload, etc. */
	pm->data_len = copy_len;
	tv = ktime_to_timeval(skb->tstamp);
	put_unaligned(tv.tv_sec, &pm->timestamp_sec);
	put_unaligned(tv.tv_usec, &pm->timestamp_usec);
	put_unaligned(skb->mark, &pm->mark);
	pm->hook = hooknum;
	if (prefix != NULL) {
		strncpy(pm->prefix, prefix, sizeof(pm->prefix) - 1);
		pm->prefix[sizeof(pm->prefix) - 1] = '\0';
	}
	else if (loginfo->prefix[0] != '\0')
		strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));

	if (in && in->hard_header_len > 0 &&
	    skb->mac_header != skb->network_header &&
	    in->hard_header_len <= ULOG_MAC_LEN) {
		memcpy(pm->mac, skb_mac_header(skb), in->hard_header_len);
		pm->mac_len = in->hard_header_len;
	} else
		pm->mac_len = 0;

	if (in)
		strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));

	if (out)
		strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));

	/* copy_len <= skb->len, so can't fail. */
	if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
		BUG();

	/* check if we are building multi-part messages */
	if (ub->qlen > 1)
		ub->lastnlh->nlmsg_flags |= NLM_F_MULTI;

	ub->lastnlh = nlh;

	/* if timer isn't already running, start it */
	if (!timer_pending(&ub->timer)) {
		ub->timer.expires = jiffies + flushtimeout * HZ / 100;
		add_timer(&ub->timer);
	}

	/* if threshold is reached, send message to userspace */
	if (ub->qlen >= loginfo->qthreshold) {
		if (loginfo->qthreshold > 1)
			nlh->nlmsg_type = NLMSG_DONE;
		ulog_send(ulog, groupnum);
	}
out_unlock:
	spin_unlock_bh(&ulog->lock);

	return;

alloc_failure:
	pr_debug("Error building netlink message\n");
	spin_unlock_bh(&ulog->lock);
}
Пример #6
0
/* Send RST reply */
static void send_reset(struct sk_buff *oldskb)
{
	struct sk_buff *nskb;
	struct tcphdr otcph, *tcph;
	unsigned int otcplen, hh_len;
	int tcphoff, needs_ack;
	const struct ipv6hdr *oip6h = oldskb->nh.ipv6h;
	struct ipv6hdr *ip6h;
	struct dst_entry *dst = NULL;
	u8 proto;
	struct flowi fl;

	if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
	    (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
		DEBUGP("ip6t_REJECT: addr is not unicast.\n");
		return;
	}

	proto = oip6h->nexthdr;
	tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto);

	if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
		DEBUGP("ip6t_REJECT: Can't get TCP header.\n");
		return;
	}

	otcplen = oldskb->len - tcphoff;

	/* IP header checks: fragment, too short. */
	if ((proto != IPPROTO_TCP) || (otcplen < sizeof(struct tcphdr))) {
		DEBUGP("ip6t_REJECT: proto(%d) != IPPROTO_TCP, or too short. otcplen = %d\n",
			proto, otcplen);
		return;
	}

	if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr)))
		BUG();

	/* No RST for RST. */
	if (otcph.rst) {
		DEBUGP("ip6t_REJECT: RST is set\n");
		return;
	}

	/* Check checksum. */
	if (csum_ipv6_magic(&oip6h->saddr, &oip6h->daddr, otcplen, IPPROTO_TCP,
			    skb_checksum(oldskb, tcphoff, otcplen, 0))) {
		DEBUGP("ip6t_REJECT: TCP checksum is invalid\n");
		return;
	}

	memset(&fl, 0, sizeof(fl));
	fl.proto = IPPROTO_TCP;
	ipv6_addr_copy(&fl.fl6_src, &oip6h->daddr);
	ipv6_addr_copy(&fl.fl6_dst, &oip6h->saddr);
	fl.fl_ip_sport = otcph.dest;
	fl.fl_ip_dport = otcph.source;
	security_skb_classify_flow(oldskb, &fl);
	dst = ip6_route_output(NULL, &fl);
	if (dst == NULL)
		return;
	if (dst->error || xfrm_lookup(&dst, &fl, NULL, 0))
		return;

	hh_len = (dst->dev->hard_header_len + 15)&~15;
	nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
			 + sizeof(struct tcphdr) + dst->trailer_len,
			 GFP_ATOMIC);

	if (!nskb) {
		if (net_ratelimit())
			printk("ip6t_REJECT: Can't alloc skb\n");
		dst_release(dst);
		return;
	}

	nskb->dst = dst;

	skb_reserve(nskb, hh_len + dst->header_len);

	ip6h = nskb->nh.ipv6h = (struct ipv6hdr *)
					skb_put(nskb, sizeof(struct ipv6hdr));
	ip6h->version = 6;
	ip6h->hop_limit = dst_metric(dst, RTAX_HOPLIMIT);
	ip6h->nexthdr = IPPROTO_TCP;
	ip6h->payload_len = htons(sizeof(struct tcphdr));
	ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr);
	ipv6_addr_copy(&ip6h->daddr, &oip6h->saddr);

	tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
	/* Truncate to length (no data) */
	tcph->doff = sizeof(struct tcphdr)/4;
	tcph->source = otcph.dest;
	tcph->dest = otcph.source;

	if (otcph.ack) {
		needs_ack = 0;
		tcph->seq = otcph.ack_seq;
		tcph->ack_seq = 0;
	} else {
		needs_ack = 1;
		tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin
				      + otcplen - (otcph.doff<<2));
		tcph->seq = 0;
	}

	/* Reset flags */
	((u_int8_t *)tcph)[13] = 0;
	tcph->rst = 1;
	tcph->ack = needs_ack;
	tcph->window = 0;
	tcph->urg_ptr = 0;
	tcph->check = 0;

	/* Adjust TCP checksum */
	tcph->check = csum_ipv6_magic(&nskb->nh.ipv6h->saddr,
				      &nskb->nh.ipv6h->daddr,
				      sizeof(struct tcphdr), IPPROTO_TCP,
				      csum_partial(tcph,
						   sizeof(struct tcphdr), 0));

	nf_ct_attach(nskb, oldskb);

	NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, nskb, NULL, nskb->dst->dev,
		dst_output);
}
Пример #7
0
/*
 * connection-level Rx packet processor
 */
static int rxrpc_process_event(struct rxrpc_connection *conn,
			       struct sk_buff *skb,
			       u32 *_abort_code)
{
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
	__be32 tmp;
	u32 serial;
	int loop, ret;

	if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
		kleave(" = -ECONNABORTED [%u]", conn->state);
		return -ECONNABORTED;
	}

	serial = ntohl(sp->hdr.serial);

	_enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, serial);

	switch (sp->hdr.type) {
	case RXRPC_PACKET_TYPE_ABORT:
		if (skb_copy_bits(skb, 0, &tmp, sizeof(tmp)) < 0)
			return -EPROTO;
		_proto("Rx ABORT %%%u { ac=%d }", serial, ntohl(tmp));

		conn->state = RXRPC_CONN_REMOTELY_ABORTED;
		rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED,
				  ntohl(tmp));
		return -ECONNABORTED;

	case RXRPC_PACKET_TYPE_CHALLENGE:
		if (conn->security)
			return conn->security->respond_to_challenge(
				conn, skb, _abort_code);
		return -EPROTO;

	case RXRPC_PACKET_TYPE_RESPONSE:
		if (!conn->security)
			return -EPROTO;

		ret = conn->security->verify_response(conn, skb, _abort_code);
		if (ret < 0)
			return ret;

		ret = conn->security->init_connection_security(conn);
		if (ret < 0)
			return ret;

		conn->security->prime_packet_security(conn);
		read_lock_bh(&conn->lock);
		spin_lock(&conn->state_lock);

		if (conn->state == RXRPC_CONN_SERVER_CHALLENGING) {
			conn->state = RXRPC_CONN_SERVER;
			for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
				rxrpc_call_is_secure(conn->channels[loop]);
		}

		spin_unlock(&conn->state_lock);
		read_unlock_bh(&conn->lock);
		return 0;

	default:
		_leave(" = -EPROTO [%u]", sp->hdr.type);
		return -EPROTO;
	}
}
static void ipt_ulog_packet(unsigned int hooknum,
			    const struct sk_buff *skb,
			    const struct net_device *in,
			    const struct net_device *out,
			    const struct ipt_ulog_info *loginfo,
			    const char *prefix)
{
	ulog_buff_t *ub;
	ulog_packet_msg_t *pm;
	size_t size, copy_len;
	struct nlmsghdr *nlh;

	/* ffs == find first bit set, necessary because userspace
	 * is already shifting groupnumber, but we need unshifted.
	 * ffs() returns [1..32], we need [0..31] */
	unsigned int groupnum = ffs(loginfo->nl_group) - 1;

	/* calculate the size of the skb needed */
	if ((loginfo->copy_range == 0) ||
	    (loginfo->copy_range > skb->len)) {
		copy_len = skb->len;
	} else {
		copy_len = loginfo->copy_range;
	}

	size = NLMSG_SPACE(sizeof(*pm) + copy_len);

	ub = &ulog_buffers[groupnum];
	
	spin_lock_bh(&ulog_lock);

	if (!ub->skb) {
		if (!(ub->skb = ulog_alloc_skb(size)))
			goto alloc_failure;
	} else if (ub->qlen >= loginfo->qthreshold ||
		   size > skb_tailroom(ub->skb)) {
		/* either the queue len is too high or we don't have 
		 * enough room in nlskb left. send it to userspace. */

		ulog_send(groupnum);

		if (!(ub->skb = ulog_alloc_skb(size)))
			goto alloc_failure;
	}

	DEBUGP("ipt_ULOG: qlen %d, qthreshold %d\n", ub->qlen, 
		loginfo->qthreshold);

	/* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */
	nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT, 
			sizeof(*pm)+copy_len);
	ub->qlen++;

	pm = NLMSG_DATA(nlh);

	/* We might not have a timestamp, get one */
	if (skb->tstamp.off_sec == 0)
		__net_timestamp((struct sk_buff *)skb);

	/* copy hook, prefix, timestamp, payload, etc. */
	pm->data_len = copy_len;
	pm->timestamp_sec = skb->tstamp.off_sec;
	pm->timestamp_usec = skb->tstamp.off_usec;
	pm->mark = skb->nfmark;
	pm->hook = hooknum;
	if (prefix != NULL)
		strncpy(pm->prefix, prefix, sizeof(pm->prefix));
	else if (loginfo->prefix[0] != '\0')
		strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
	else
		*(pm->prefix) = '\0';

	if (in && in->hard_header_len > 0
	    && skb->mac.raw != (void *) skb->nh.iph
	    && in->hard_header_len <= ULOG_MAC_LEN) {
		memcpy(pm->mac, skb->mac.raw, in->hard_header_len);
		pm->mac_len = in->hard_header_len;
	} else
		pm->mac_len = 0;

	if (in)
		strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));
	else
		pm->indev_name[0] = '\0';

	if (out)
		strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
	else
		pm->outdev_name[0] = '\0';

	/* copy_len <= skb->len, so can't fail. */
	if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
		BUG();
	
	/* check if we are building multi-part messages */
	if (ub->qlen > 1) {
		ub->lastnlh->nlmsg_flags |= NLM_F_MULTI;
	}

	ub->lastnlh = nlh;

	/* if timer isn't already running, start it */
	if (!timer_pending(&ub->timer)) {
		ub->timer.expires = jiffies + flushtimeout * HZ / 100;
		add_timer(&ub->timer);
	}

	/* if threshold is reached, send message to userspace */
	if (ub->qlen >= loginfo->qthreshold) {
		if (loginfo->qthreshold > 1)
			nlh->nlmsg_type = NLMSG_DONE;
		ulog_send(groupnum);
	}

	spin_unlock_bh(&ulog_lock);

	return;

nlmsg_failure:
	PRINTR("ipt_ULOG: error during NLMSG_PUT\n");

alloc_failure:
	PRINTR("ipt_ULOG: Error building netlink message\n");

	spin_unlock_bh(&ulog_lock);
}
Пример #9
0
static int help(struct sk_buff *skb,
		struct ip_conntrack *ct, enum ip_conntrack_info ctinfo)
{
	char *data, *data_limit;
	int dir = CTINFO2DIR(ctinfo);
	unsigned int dataoff, i;
	struct ip_ct_amanda *info =
				(struct ip_ct_amanda *)&ct->help.ct_ftp_info;

	/* Can't track connections formed before we registered */
	if (!info)
		return NF_ACCEPT;

	/* increase the UDP timeout of the master connection as replies from
	 * Amanda clients to the server can be quite delayed */
	ip_ct_refresh(ct, master_timeout * HZ);

	/* If packet is coming from Amanda server */
	if (dir == IP_CT_DIR_ORIGINAL)
		return NF_ACCEPT;

	/* No data? */
	dataoff = skb->nh.iph->ihl*4 + sizeof(struct udphdr);
	if (dataoff >= skb->len) {
		if (net_ratelimit())
			printk("ip_conntrack_amanda_help: skblen = %u\n",
			       (unsigned)skb->len);
		return NF_ACCEPT;
	}

	LOCK_BH(&ip_amanda_lock);
	skb_copy_bits(skb, dataoff, amanda_buffer, skb->len - dataoff);
	data = amanda_buffer;
	data_limit = amanda_buffer + skb->len - dataoff;
	*data_limit = '\0';

	/* Search for the CONNECT string */
	data = strstr(data, "CONNECT ");
	if (!data)
		goto out;

	DEBUGP("ip_conntrack_amanda_help: CONNECT found in connection "
		   "%u.%u.%u.%u:%u %u.%u.%u.%u:%u\n",
		   NIPQUAD(iph->saddr), htons(udph->source),
		   NIPQUAD(iph->daddr), htons(udph->dest));
	data += strlen("CONNECT ");

	/* Only search first line. */	
	if (strchr(data, '\n'))
		*strchr(data, '\n') = '\0';

	for (i = 0; i < ARRAY_SIZE(conns); i++) {
		char *match = strstr(data, conns[i]);
		if (match) {
			char *portchr;
			struct ip_conntrack_expect expect;
			struct ip_ct_amanda_expect *exp_amanda_info =
				&expect.help.exp_amanda_info;

			memset(&expect, 0, sizeof(expect));

			data += strlen(conns[i]);
			/* this is not really tcp, but let's steal an
			 * idea from a tcp stream helper :-) */
			// XXX expect.seq = data - amanda_buffer;
			exp_amanda_info->offset = data - amanda_buffer;
// XXX DEBUGP("expect.seq = %p - %p = %d\n", data, amanda_buffer, expect.seq);
DEBUGP("exp_amanda_info->offset = %p - %p = %d\n", data, amanda_buffer, exp_amanda_info->offset);
			portchr = data;
			exp_amanda_info->port = simple_strtoul(data, &data,10);
			exp_amanda_info->len = data - portchr;

			/* eat whitespace */
			while (*data == ' ')
				data++;
			DEBUGP("ip_conntrack_amanda_help: "
			       "CONNECT %s request with port "
			       "%u found\n", conns[i],
			       exp_amanda_info->port);

			expect.tuple = ((struct ip_conntrack_tuple)
				{ { ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip,
				    { 0 } },
				  { ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip,
				    { htons(exp_amanda_info->port) },
				    IPPROTO_TCP }});
Пример #10
0
/* Send RST reply */
static void send_reset(struct net *net, struct sk_buff *oldskb)
{
	struct sk_buff *nskb;
	struct tcphdr otcph, *tcph;
	unsigned int otcplen, hh_len;
	int tcphoff, needs_ack;
	const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
	struct ipv6hdr *ip6h;
#define DEFAULT_TOS_VALUE	0x0U
	const __u8 tclass = DEFAULT_TOS_VALUE;
	struct dst_entry *dst = NULL;
	u8 proto;
	__be16 frag_off;
	struct flowi6 fl6;

	if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
	    (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
		pr_debug("addr is not unicast.\n");
		return;
	}

	proto = oip6h->nexthdr;
	tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off);

	if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
		pr_debug("Cannot get TCP header.\n");
		return;
	}

	otcplen = oldskb->len - tcphoff;

	/* IP header checks: fragment, too short. */
	if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) {
		pr_debug("proto(%d) != IPPROTO_TCP, "
			 "or too short. otcplen = %d\n",
			 proto, otcplen);
		return;
	}

	if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr)))
		BUG();

	/* No RST for RST. */
	if (otcph.rst) {
		pr_debug("RST is set\n");
		return;
	}

	/* Check checksum. */
	if (csum_ipv6_magic(&oip6h->saddr, &oip6h->daddr, otcplen, IPPROTO_TCP,
			    skb_checksum(oldskb, tcphoff, otcplen, 0))) {
		pr_debug("TCP checksum is invalid\n");
		return;
	}

	memset(&fl6, 0, sizeof(fl6));
	fl6.flowi6_proto = IPPROTO_TCP;
	fl6.saddr = oip6h->daddr;
	fl6.daddr = oip6h->saddr;
	fl6.fl6_sport = otcph.dest;
	fl6.fl6_dport = otcph.source;
	security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
	dst = ip6_route_output(net, NULL, &fl6);
	if (dst == NULL || dst->error) {
		dst_release(dst);
		return;
	}
	dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
	if (IS_ERR(dst))
		return;

	hh_len = (dst->dev->hard_header_len + 15)&~15;
	nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
			 + sizeof(struct tcphdr) + dst->trailer_len,
			 GFP_ATOMIC);

	if (!nskb) {
		net_dbg_ratelimited("cannot alloc skb\n");
		dst_release(dst);
		return;
	}

	skb_dst_set(nskb, dst);

	skb_reserve(nskb, hh_len + dst->header_len);

	skb_put(nskb, sizeof(struct ipv6hdr));
	skb_reset_network_header(nskb);
	ip6h = ipv6_hdr(nskb);
	ip6_flow_hdr(ip6h, tclass, 0);
	ip6h->hop_limit = ip6_dst_hoplimit(dst);
	ip6h->nexthdr = IPPROTO_TCP;
	ip6h->saddr = oip6h->daddr;
	ip6h->daddr = oip6h->saddr;

	skb_reset_transport_header(nskb);
	tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
	/* Truncate to length (no data) */
	tcph->doff = sizeof(struct tcphdr)/4;
	tcph->source = otcph.dest;
	tcph->dest = otcph.source;

	if (otcph.ack) {
		needs_ack = 0;
		tcph->seq = otcph.ack_seq;
		tcph->ack_seq = 0;
	} else {
		needs_ack = 1;
		tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin
				      + otcplen - (otcph.doff<<2));
		tcph->seq = 0;
	}

	/* Reset flags */
	((u_int8_t *)tcph)[13] = 0;
	tcph->rst = 1;
	tcph->ack = needs_ack;
	tcph->window = 0;
	tcph->urg_ptr = 0;
	tcph->check = 0;

	/* Adjust TCP checksum */
	tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr,
				      &ipv6_hdr(nskb)->daddr,
				      sizeof(struct tcphdr), IPPROTO_TCP,
				      csum_partial(tcph,
						   sizeof(struct tcphdr), 0));

	nf_ct_attach(nskb, oldskb);

	ip6_local_out(nskb);
}
Пример #11
0
int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
{
	struct sk_buff *frag;
	struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
	struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
	struct ipv6hdr *tmp_hdr;
	struct frag_hdr *fh;
	unsigned int mtu, hlen, left, len;
	__be32 frag_id = 0;
	int ptr, offset = 0, err=0;
	u8 *prevhdr, nexthdr = 0;
	struct net *net = dev_net(skb_dst(skb)->dev);

	hlen = ip6_find_1stfragopt(skb, &prevhdr);
	nexthdr = *prevhdr;

	mtu = ip6_skb_dst_mtu(skb);

	/* We must not fragment if the socket is set to force MTU discovery
	 * or if the skb it not generated by a local socket.  (This last
	 * check should be redundant, but it's free.)
	 */
	if (!skb->local_df) {
		skb->dev = skb_dst(skb)->dev;
		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
			      IPSTATS_MIB_FRAGFAILS);
		kfree_skb(skb);
		return -EMSGSIZE;
	}

	if (np && np->frag_size < mtu) {
		if (np->frag_size)
			mtu = np->frag_size;
	}
	mtu -= hlen + sizeof(struct frag_hdr);

	if (skb_has_frags(skb)) {
		int first_len = skb_pagelen(skb);
		int truesizes = 0;

		if (first_len - hlen > mtu ||
		    ((first_len - hlen) & 7) ||
		    skb_cloned(skb))
			goto slow_path;

		skb_walk_frags(skb, frag) {
			/* Correct geometry. */
			if (frag->len > mtu ||
			    ((frag->len & 7) && frag->next) ||
			    skb_headroom(frag) < hlen)
			    goto slow_path;

			/* Partially cloned skb? */
			if (skb_shared(frag))
				goto slow_path;

			BUG_ON(frag->sk);
			if (skb->sk) {
				frag->sk = skb->sk;
				frag->destructor = sock_wfree;
				truesizes += frag->truesize;
			}
		}

		err = 0;
		offset = 0;
		frag = skb_shinfo(skb)->frag_list;
		skb_frag_list_init(skb);
		/* BUILD HEADER */

		*prevhdr = NEXTHDR_FRAGMENT;
		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
		if (!tmp_hdr) {
			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
				      IPSTATS_MIB_FRAGFAILS);
			return -ENOMEM;
		}

		__skb_pull(skb, hlen);
		fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
		__skb_push(skb, hlen);
		skb_reset_network_header(skb);
		memcpy(skb_network_header(skb), tmp_hdr, hlen);

		ipv6_select_ident(fh, &rt->rt6i_dst.addr);
		fh->nexthdr = nexthdr;
		fh->reserved = 0;
		fh->frag_off = htons(IP6_MF);
		frag_id = fh->identification;

		first_len = skb_pagelen(skb);
		skb->data_len = first_len - skb_headlen(skb);
		skb->truesize -= truesizes;
		skb->len = first_len;
		ipv6_hdr(skb)->payload_len = htons(first_len -
						   sizeof(struct ipv6hdr));

		dst_hold(&rt->u.dst);

		for (;;) {
			/* Prepare header of the next frame,
			 * before previous one went down. */
			if (frag) {
				frag->ip_summed = CHECKSUM_NONE;
				skb_reset_transport_header(frag);
				fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
				__skb_push(frag, hlen);
				skb_reset_network_header(frag);
				memcpy(skb_network_header(frag), tmp_hdr,
				       hlen);
				offset += skb->len - hlen - sizeof(struct frag_hdr);
				fh->nexthdr = nexthdr;
				fh->reserved = 0;
				fh->frag_off = htons(offset);
				if (frag->next != NULL)
					fh->frag_off |= htons(IP6_MF);
				fh->identification = frag_id;
				ipv6_hdr(frag)->payload_len =
						htons(frag->len -
						      sizeof(struct ipv6hdr));
				ip6_copy_metadata(frag, skb);
			}

			err = output(skb);
			if(!err)
				IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst),
					      IPSTATS_MIB_FRAGCREATES);

			if (err || !frag)
				break;

			skb = frag;
			frag = skb->next;
			skb->next = NULL;
		}

		kfree(tmp_hdr);

		if (err == 0) {
			IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst),
				      IPSTATS_MIB_FRAGOKS);
			dst_release(&rt->u.dst);
			return 0;
		}

		while (frag) {
			skb = frag->next;
			kfree_skb(frag);
			frag = skb;
		}

		IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst),
			      IPSTATS_MIB_FRAGFAILS);
		dst_release(&rt->u.dst);
		return err;
	}

slow_path:
	if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
	    skb_checksum_help(skb))
		goto fail;

	left = skb->len - hlen;		/* Space per frame */
	ptr = hlen;			/* Where to start from */

	/*
	 *	Fragment the datagram.
	 */

	*prevhdr = NEXTHDR_FRAGMENT;

	/*
	 *	Keep copying data until we run out.
	 */
	while(left > 0)	{
		len = left;
		/* IF: it doesn't fit, use 'mtu' - the data space left */
		if (len > mtu)
			len = mtu;
		/* IF: we are not sending upto and including the packet end
		   then align the next start on an eight byte boundary */
		if (len < left)	{
			len &= ~7;
		}
		/*
		 *	Allocate buffer.
		 */

		if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
			NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
				      IPSTATS_MIB_FRAGFAILS);
			err = -ENOMEM;
			goto fail;
		}

		/*
		 *	Set up data on packet
		 */

		ip6_copy_metadata(frag, skb);
		skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
		skb_reset_network_header(frag);
		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
		frag->transport_header = (frag->network_header + hlen +
					  sizeof(struct frag_hdr));

		/*
		 *	Charge the memory for the fragment to any owner
		 *	it might possess
		 */
		if (skb->sk)
			skb_set_owner_w(frag, skb->sk);

		/*
		 *	Copy the packet header into the new buffer.
		 */
		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);

		/*
		 *	Build fragment header.
		 */
		fh->nexthdr = nexthdr;
		fh->reserved = 0;
		if (!frag_id) {
			ipv6_select_ident(fh, &rt->rt6i_dst.addr);
			frag_id = fh->identification;
		} else
			fh->identification = frag_id;

		/*
		 *	Copy a block of the IP datagram.
		 */
		if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
			BUG();
		left -= len;

		fh->frag_off = htons(offset);
		if (left > 0)
			fh->frag_off |= htons(IP6_MF);
		ipv6_hdr(frag)->payload_len = htons(frag->len -
						    sizeof(struct ipv6hdr));

		ptr += len;
		offset += len;

		/*
		 *	Put this fragment into the sending queue.
		 */
		err = output(frag);
		if (err)
			goto fail;

		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
			      IPSTATS_MIB_FRAGCREATES);
	}
	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
		      IPSTATS_MIB_FRAGOKS);
	kfree_skb(skb);
	return err;

fail:
	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
		      IPSTATS_MIB_FRAGFAILS);
	kfree_skb(skb);
	return err;
}
Пример #12
0
static void send_unreach(struct sk_buff *skb_in, int code)
{
	struct iphdr *iph;
	struct udphdr *udph;
	struct icmphdr *icmph;
	struct sk_buff *nskb;
	u32 saddr;
	u8 tos;
	int hh_len, length;
	struct rtable *rt = (struct rtable*)skb_in->dst;
	unsigned char *data;

	if (!rt)
		return;

	/* FIXME: Use sysctl number. --RR */
	if (!xrlim_allow(&rt->u.dst, 1*HZ))
		return;

	iph = skb_in->nh.iph;

	/* No replies to physical multicast/broadcast */
	if (skb_in->pkt_type!=PACKET_HOST)
		return;

	/* Now check at the protocol level */
	if (rt->rt_flags&(RTCF_BROADCAST|RTCF_MULTICAST))
		return;

	/* Only reply to fragment 0. */
	if (iph->frag_off&htons(IP_OFFSET))
		return;

	/* Ensure we have at least 8 bytes of proto header. */
	if (skb_in->len < skb_in->nh.iph->ihl*4 + 8)
		return;

	/* if UDP checksum is set, verify it's correct */
	if (iph->protocol == IPPROTO_UDP
	    && skb_in->tail-(u8*)iph >= sizeof(struct udphdr)) {
		int datalen = skb_in->len - (iph->ihl<<2);
		udph = (struct udphdr *)((char *)iph + (iph->ihl<<2));
		if (udph->check
		    && csum_tcpudp_magic(iph->saddr, iph->daddr,
		                         datalen, IPPROTO_UDP,
		                         csum_partial((char *)udph, datalen,
		                                      0)) != 0)
			return;
	}

	/* If we send an ICMP error to an ICMP error a mess would result.. */
	if (iph->protocol == IPPROTO_ICMP
	    && skb_in->tail-(u8*)iph >= sizeof(struct icmphdr)) {
		icmph = (struct icmphdr *)((char *)iph + (iph->ihl<<2));

		if (skb_copy_bits(skb_in, skb_in->nh.iph->ihl*4,
				  icmph, sizeof(*icmph)) < 0)
			return;

		/* Between echo-reply (0) and timestamp (13),
		   everything except echo-request (8) is an error.
		   Also, anything greater than NR_ICMP_TYPES is
		   unknown, and hence should be treated as an error... */
		if ((icmph->type < ICMP_TIMESTAMP
		     && icmph->type != ICMP_ECHOREPLY
		     && icmph->type != ICMP_ECHO)
		    || icmph->type > NR_ICMP_TYPES)
			return;
	}

	saddr = iph->daddr;
	if (!(rt->rt_flags & RTCF_LOCAL))
		saddr = 0;

	tos = (iph->tos & IPTOS_TOS_MASK) | IPTOS_PREC_INTERNETCONTROL;

	{
		struct flowi fl = { .nl_u = { .ip4_u =
					      { .daddr = skb_in->nh.iph->saddr,
						.saddr = saddr,
						.tos = RT_TOS(tos) } } };
		if (ip_route_output_key(&rt, &fl))
			return;
	}
	/* RFC says return as much as we can without exceeding 576 bytes. */
	length = skb_in->len + sizeof(struct iphdr) + sizeof(struct icmphdr);

	if (length > dst_pmtu(&rt->u.dst))
		length = dst_pmtu(&rt->u.dst);
	if (length > 576)
		length = 576;

	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);

	nskb = alloc_skb(hh_len + length, GFP_ATOMIC);
	if (!nskb) {
		ip_rt_put(rt);
		return;
	}

	nskb->priority = 0;
	nskb->dst = &rt->u.dst;
	skb_reserve(nskb, hh_len);

	/* Set up IP header */
	iph = nskb->nh.iph
		= (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
	iph->version=4;
	iph->ihl=5;
	iph->tos=tos;
	iph->tot_len = htons(length);

	/* PMTU discovery never applies to ICMP packets. */
	iph->frag_off = 0;

	iph->ttl = MAXTTL;
	ip_select_ident(iph, &rt->u.dst, NULL);
	iph->protocol=IPPROTO_ICMP;
	iph->saddr=rt->rt_src;
	iph->daddr=rt->rt_dst;
	iph->check=0;
	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);

	/* Set up ICMP header. */
	icmph = nskb->h.icmph
		= (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
	icmph->type = ICMP_DEST_UNREACH;
	icmph->code = code;	
	icmph->un.gateway = 0;
	icmph->checksum = 0;
	
	/* Copy as much of original packet as will fit */
	data = skb_put(nskb,
		       length - sizeof(struct iphdr) - sizeof(struct icmphdr));

	skb_copy_bits(skb_in, 0, data,
		      length - sizeof(struct iphdr) - sizeof(struct icmphdr));

	icmph->checksum = ip_compute_csum((unsigned char *)icmph,
					  length - sizeof(struct iphdr));

	connection_attach(nskb, skb_in->nfct);

	NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, nskb, NULL, nskb->dst->dev,
		ip_finish_output);
}	
Пример #13
0
/* Send RST reply */
static void send_reset(struct sk_buff *oldskb, int hook)
{
	struct sk_buff *nskb;
	struct tcphdr otcph, *tcph;
	struct rtable *rt;
	u_int16_t tmp_port;
	u_int32_t tmp_addr;
	int needs_ack;
	int hh_len;

	/* IP header checks: fragment. */
	if (oldskb->nh.iph->frag_off & htons(IP_OFFSET))
		return;

	if (skb_copy_bits(oldskb, oldskb->nh.iph->ihl*4,
			  &otcph, sizeof(otcph)) < 0)
 		return;

	/* No RST for RST. */
	if (otcph.rst)
		return;

	/* FIXME: Check checksum --RR */
	if ((rt = route_reverse(oldskb, hook)) == NULL)
		return;

	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);

	/* We need a linear, writeable skb.  We also need to expand
	   headroom in case hh_len of incoming interface < hh_len of
	   outgoing interface */
	nskb = skb_copy_expand(oldskb, hh_len, skb_tailroom(oldskb),
			       GFP_ATOMIC);
	if (!nskb) {
		dst_release(&rt->u.dst);
		return;
	}

	dst_release(nskb->dst);
	nskb->dst = &rt->u.dst;

	/* This packet will not be the same as the other: clear nf fields */
	nf_conntrack_put(nskb->nfct);
	nskb->nfct = NULL;
	nskb->nfcache = 0;
#ifdef CONFIG_NETFILTER_DEBUG
	nskb->nf_debug = 0;
#endif
	nskb->nfmark = 0;
#ifdef CONFIG_BRIDGE_NETFILTER
	nf_bridge_put(nskb->nf_bridge);
	nskb->nf_bridge = NULL;
#endif

	tcph = (struct tcphdr *)((u_int32_t*)nskb->nh.iph + nskb->nh.iph->ihl);

	/* Swap source and dest */
	tmp_addr = nskb->nh.iph->saddr;
	nskb->nh.iph->saddr = nskb->nh.iph->daddr;
	nskb->nh.iph->daddr = tmp_addr;
	tmp_port = tcph->source;
	tcph->source = tcph->dest;
	tcph->dest = tmp_port;

	/* Truncate to length (no data) */
	tcph->doff = sizeof(struct tcphdr)/4;
	skb_trim(nskb, nskb->nh.iph->ihl*4 + sizeof(struct tcphdr));
	nskb->nh.iph->tot_len = htons(nskb->len);

	if (tcph->ack) {
		needs_ack = 0;
		tcph->seq = otcph.ack_seq;
		tcph->ack_seq = 0;
	} else {
		needs_ack = 1;
		tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin
				      + oldskb->len - oldskb->nh.iph->ihl*4
				      - (otcph.doff<<2));
		tcph->seq = 0;
	}

	/* Reset flags */
	((u_int8_t *)tcph)[13] = 0;
	tcph->rst = 1;
	tcph->ack = needs_ack;

	tcph->window = 0;
	tcph->urg_ptr = 0;

	/* Adjust TCP checksum */
	tcph->check = 0;
	tcph->check = tcp_v4_check(tcph, sizeof(struct tcphdr),
				   nskb->nh.iph->saddr,
				   nskb->nh.iph->daddr,
				   csum_partial((char *)tcph,
						sizeof(struct tcphdr), 0));

	/* Adjust IP TTL, DF */
	nskb->nh.iph->ttl = MAXTTL;
	/* Set DF, id = 0 */
	nskb->nh.iph->frag_off = htons(IP_DF);
	nskb->nh.iph->id = 0;

	/* Adjust IP checksum */
	nskb->nh.iph->check = 0;
	nskb->nh.iph->check = ip_fast_csum((unsigned char *)nskb->nh.iph, 
					   nskb->nh.iph->ihl);

	/* "Never happens" */
	if (nskb->len > dst_pmtu(nskb->dst))
		goto free_nskb;

	connection_attach(nskb, oldskb->nfct);

	NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, nskb, NULL, nskb->dst->dev,
		ip_finish_output);
	return;

 free_nskb:
	kfree_skb(nskb);
}
bool 
ns_nat64_tcp_manip6_pkt(struct sk_buff *skb,
		unsigned int iphdroff, struct netsession_tuple *tuple)
{
	struct sk_buff * pskb = NULL;
    struct ipv6hdr ip6hdr = {0};
    struct iphdr *ip4hdr = NULL;
	struct frag_hdr ip6_frag_hdr = {0};
	bool ret = NS_ACCEPT;
    s32 status;
    u8 nexthdr;
    s32 exthdr_len = 0;
	struct tcphdr * tcp_hdr = NULL;
	unsigned char  iphdr_buff[NS_NAT64_IPHDROFF] = {0};

	if((pskb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {        
        ret = NS_DROP;
        goto out;
    }
    skb_linearize(pskb);

	if(skb_copy_bits(pskb, iphdroff, &ip6hdr, sizeof(struct ipv6hdr)) < 0) {
        ret = NS_DROP;
        goto out; 
    }
	nexthdr = ip6hdr.nexthdr;
	
	if(ip6hdr.hop_limit < 2) {
        ret = NS_DROP;
        goto out;
    }

	status = nat64_exthdr_process_6to4(pskb, iphdroff, &nexthdr, &ip6_frag_hdr, &exthdr_len);
	if(NS_DROP == status) {
        ret = NS_DROP;
        goto out;
    }

	ip6hdr.nexthdr = nexthdr;

	if (iphdroff) {
		if (skb_copy_bits(pskb, 0, iphdr_buff, 
			(iphdroff<=NS_NAT64_IPHDROFF) ? iphdroff : NS_NAT64_IPHDROFF)) {
			ret = NS_DROP;
			goto out;
		}
	}
	
	/* 去除ip6头部及扩展首部 */    
    skb_pull(pskb, sizeof(struct ipv6hdr) + exthdr_len + iphdroff);    
    ip6hdr.payload_len -= exthdr_len; 

	tcp_hdr = (struct tcphdr *)pskb->data;
    tcp_hdr->source = tuple->src.u.tcp.port;
    //tcp_hdr->dest   = tuple->dst.u.tcp.port;
    /* 更新校验和 */
    tcp_hdr->check  = 0;            
    tcp_hdr->check  = csum_tcpudp_magic(tuple->src.u3.ip, tuple->dst.u3.ip,
        ip6hdr.payload_len, IPPROTO_TCP, csum_partial((u8 *)tcp_hdr, ip6hdr.payload_len, 0));

	/* 加入ipv4头部 */
    skb_push(pskb, sizeof(struct iphdr) + iphdroff);
	ip4hdr = (struct iphdr*)(pskb->data + iphdroff);
	
	nat64_iphdr_6to4(&ip6hdr, ip4hdr, tuple);
	if(NS_NAT64_FRAG_HDR_EXIST == status) {        
        /* 包含分片首部,修正ipv4报头的默认值 */
        ip4hdr->id = (u16)(ip6_frag_hdr.identification);
        ip4hdr->frag_off &= ~IP_DF;
        ip4hdr->frag_off |= (ip6_frag_hdr.frag_off) & IP_MF;
        ip4hdr->frag_off |= (ip6_frag_hdr.frag_off) & IP_OFFSET;        
    }
	ip_send_check(ip4hdr);

	if (iphdroff)
		memcpy(skb->data, iphdr_buff, (iphdroff<=NS_NAT64_IPHDROFF) ? iphdroff : NS_NAT64_IPHDROFF);
	
    pskb->nh.iph = (struct iphdr *)pskb->data;
out:
	return ret;
}
bool 
ns_nat64_tcp_manip4_pkt(struct sk_buff *skb, 
	unsigned int iphdroff, struct netsession_tuple *tuple)
{
	s32 ret = NS_ACCEPT;
    struct iphdr ip4hdr = {0};   
    struct sk_buff * pskb = NULL;
    struct sk_buff * sk_tmp = NULL;
	struct tcphdr * tcp_hdr;
    struct ipv6hdr *ip6hdr = NULL;
	unsigned char  iphdr_buff[NS_NAT64_IPHDROFF] = {0};
	
    s32 realloc_len = (sizeof(struct ipv6hdr) - sizeof(struct iphdr))*2 + sizeof(struct frag_hdr)
        + sizeof(struct icmp6hdr) - sizeof(struct icmphdr);

	if((pskb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {        
        ret = NS_DROP;
        goto out;
    }
    skb_linearize(pskb);

	if(skb_copy_bits(pskb, iphdroff, &ip4hdr, sizeof(struct iphdr)) < 0) {
        ret = NS_DROP;
        goto out; 
    }

	if(ip4hdr.ttl < 2) {         
        ret = NS_DROP;
        goto out;
    }

	if (skb_headroom(pskb) < realloc_len) {
        sk_tmp = pskb;
		pskb = skb_realloc_headroom(sk_tmp, realloc_len);
		kfree_skb(sk_tmp);       
		if (!pskb) {
            ret = NS_DROP;
            goto out;
		}
	}

	if (iphdroff) {
		if (skb_copy_bits(pskb, 0, iphdr_buff, 
			(iphdroff<=NS_NAT64_IPHDROFF) ? iphdroff : NS_NAT64_IPHDROFF)) {
			ret = NS_DROP;
			goto out;
		}
	}
	
	skb_pull(pskb, ((ip4hdr.ihl)<<2) + iphdroff);
    ip4hdr.tot_len -= (ip4hdr.ihl)<<2; 

	tcp_hdr = (struct tcphdr *)(pskb->data + iphdroff);
    /* 端口转换 */
    tcp_hdr->source = tuple->src.u.tcp.port;
    tcp_hdr->dest   = tuple->dst.u.tcp.port;
    /* 更新校验和 */
    tcp_hdr->check = 0;
    tcp_hdr->check = csum_ipv6_magic(&tuple->src.u3.in6, &tuple->dst.u3.in6, ip4hdr.tot_len,
        IPPROTO_TCP, csum_partial(tcp_hdr, ip4hdr.tot_len, 0));

	skb_push(pskb, sizeof(struct ipv6hdr));
	ip6hdr = (struct ipv6hdr*)(pskb->data + iphdroff);
	
	nat64_iphdr_4to6(&ip4hdr, ip6hdr, tuple);

	if (iphdroff)
		memcpy(skb->data, iphdr_buff, (iphdroff<=NS_NAT64_IPHDROFF) ? iphdroff : NS_NAT64_IPHDROFF);
	
    pskb->nh.raw = pskb->data;
	
out:
	return ret;
}
Пример #16
0
static unsigned int switch_hook_forward(
	unsigned int hook,
	struct sk_buff *skb,
	const struct net_device *dev_in,
	const struct net_device *dev_out,
	int (*okfn)(struct sk_buff *)
) {
	//   layer 2   //
	//-------------// skb->data
	//   layer 3   //
	unsigned int result = NF_ACCEPT;
	struct iphdr *ip_header;
	ip_header = ip_hdr(skb);
	if (ip_header->protocol == IPPROTO_TCP) {
		unsigned int ip_header_length;
		unsigned int ip_packet_length;
		ip_header_length = ip_hdrlen(skb);
		ip_packet_length = ntohs(ip_header->tot_len);
		if (ip_packet_length > MTU) {
			int unused;
			int remain;
			int length;
			int offset;
			int pstart;
			__be16 morefrag;
			struct sk_buff *skb_frag;
			struct iphdr *ip_header_frag;
			skb_push(skb, ETH_HLEN);
			//   layer 1   //
			//-------------// skb->data
			//   layer 2   //
			unused = LL_RESERVED_SPACE(skb->dev);
			remain = skb->len - ETH_HLEN - ip_header_length;
			offset = (ntohs(ip_header->frag_off) & IP_OFFSET) << 3;
			pstart = ETH_HLEN + ip_header_length;
			morefrag = ip_header->frag_off & htons(IP_MF);
			while (remain > 0) {
				length = remain > MTU ? MTU : remain;
				if ((skb_frag = alloc_skb(unused + ETH_HLEN + ip_header_length + length, GFP_ATOMIC)) == NULL) {
					break;
				}
				skb_frag->dev = skb->dev;
				skb_reserve(skb_frag, unused);
				skb_put(skb_frag, ETH_HLEN + ip_header_length + length);
				skb_frag->mac_header = skb_frag->data;
				skb_frag->network_header = skb_frag->data + ETH_HLEN;
				skb_frag->transport_header = skb_frag->data + ETH_HLEN + ip_header_length;
				skb_copy_from_linear_data(skb, skb_mac_header(skb_frag), ETH_HLEN + ip_header_length);
				skb_copy_bits(skb, pstart, skb_transport_header(skb_frag), length);
				remain = remain - length;
				//   layer 1   //
				//-------------// skb_frag->data
				//   layer 2   //
				skb_pull(skb_frag, ETH_HLEN);
				//   layer 2   //
				//-------------// skb_frag->data
				//   layer 3   //
				skb_reset_network_header(skb_frag);
				skb_pull(skb_frag, ip_header_length);
				//   layer 3   //
				//-------------// skb_frag->data
				//   layer 4   //
				skb_reset_transport_header(skb_frag);
				skb_push(skb_frag, ip_header_length);
				//   layer 2   //
				//-------------// skb_frag->data
				//   layer 3   //
				ip_header_frag = ip_hdr(skb_frag);
				ip_header_frag->frag_off = htons(offset >> 3);
				if (remain > 0 || morefrag) {
					ip_header_frag->frag_off = ip_header_frag->frag_off | htons(IP_MF);
				}
				ip_header_frag->frag_off = ip_header_frag->frag_off | htons(IP_DF);
				ip_header_frag->tot_len = htons(ip_header_length + length);
				ip_header_frag->protocol = IPPROTO_TCP;
				ip_send_check(ip_header_frag);
				skb_push(skb_frag, ETH_HLEN);
				//   layer 1   //
				//-------------// skb_frag->data
				//   layer 2   //
				dev_queue_xmit(skb_frag);
				pstart = pstart + length;
				offset = offset + length;
			}
			skb_pull(skb, ETH_HLEN);
			//   layer 2   //
			//-------------// skb->data
			//   layer 3   //
			result = NF_DROP;
		}
static int usb_net_raw_ip_tx_urb_submit(struct baseband_usb *usb,
	struct sk_buff *skb)
{
	struct urb *urb;
	unsigned char *buf;
	int err;

	pr_debug("usb_net_raw_ip_tx_urb_submit {\n");

	/* check input */
	if (!usb) {
		pr_err("%s: !usb\n", __func__);
		return -EINVAL;
	}
	if (!usb->usb.interface) {
		pr_err("usb interface disconnected - not submitting tx urb\n");
		return -EINVAL;
	}
	if (!skb) {
		pr_err("%s: !skb\n", __func__);
		usb_autopm_put_interface_async(usb->usb.interface);
		return -EINVAL;
	}

	/* allocate urb */
	urb = usb_alloc_urb(0, GFP_ATOMIC);
	if (!urb) {
		pr_err("usb_alloc_urb() failed\n");
		usb_autopm_put_interface_async(usb->usb.interface);
		return -ENOMEM;
	}
	buf = kzalloc(skb->len - 14, GFP_ATOMIC);
	if (!buf) {
		pr_err("usb buffer kzalloc() failed\n");
		usb_free_urb(urb);
		usb_autopm_put_interface_async(usb->usb.interface);
		return -ENOMEM;
	}
	err = skb_copy_bits(skb, 14, buf, skb->len - 14);
	if (err < 0) {
		pr_err("skb_copy_bits() failed - %d\n", err);
		kfree(buf);
		usb_free_urb(urb);
		usb_autopm_put_interface_async(usb->usb.interface);
		return err;
	}
	usb_fill_bulk_urb(urb, usb->usb.device, usb->usb.pipe.bulk.out,
		buf, skb->len - 14,
		usb_net_raw_ip_tx_urb_comp,
		usb);
	urb->transfer_flags = URB_ZERO_PACKET;
	pr_debug("%s: ntohs(skb->protocol) %04x (%s)\n",
		__func__, ntohs(skb->protocol),
		(ntohs(skb->protocol) == 0x0800)
			? "IPv4"
			: (ntohs(skb->protocol) == 0x86dd)
			? "IPv6"
			: "unknown");
	pr_debug("%s: %02x %02x %02x %02x\n", __func__,
		((unsigned char *)urb->transfer_buffer)[0],
		((unsigned char *)urb->transfer_buffer)[1],
		((unsigned char *)urb->transfer_buffer)[2],
		((unsigned char *)urb->transfer_buffer)[3]);

	/* queue tx urb work */
	usb_anchor_urb(urb, &usb->usb.tx_urb_deferred);
	queue_work(usb->usb.tx_workqueue, &usb->usb.tx_work);

	/* free skb */
	consume_skb(skb);

	pr_debug("usb_net_raw_ip_tx_urb_submit }\n");
	return 0;
}
Пример #18
0
static inline int 
__build_packet_message(struct nfulnl_instance *inst,
			const struct sk_buff *skb, 
			unsigned int data_len,
			unsigned int pf,
			unsigned int hooknum,
			const struct net_device *indev,
			const struct net_device *outdev,
			const struct nf_loginfo *li,
			const char *prefix)
{
	unsigned char *old_tail;
	struct nfulnl_msg_packet_hdr pmsg;
	struct nlmsghdr *nlh;
	struct nfgenmsg *nfmsg;
	u_int32_t tmp_uint;

	UDEBUG("entered\n");
		
	old_tail = inst->skb->tail;
	nlh = NLMSG_PUT(inst->skb, 0, 0, 
			NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
			sizeof(struct nfgenmsg));
	nfmsg = NLMSG_DATA(nlh);
	nfmsg->nfgen_family = pf;
	nfmsg->version = NFNETLINK_V0;
	nfmsg->res_id = htons(inst->group_num);

	pmsg.hw_protocol	= htons(skb->protocol);
	pmsg.hook		= hooknum;

	NFA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg);

	if (prefix) {
		int slen = strlen(prefix);
		if (slen > NFULNL_PREFIXLEN)
			slen = NFULNL_PREFIXLEN;
		NFA_PUT(inst->skb, NFULA_PREFIX, slen, prefix);
	}

	if (indev) {
		tmp_uint = htonl(indev->ifindex);
#ifndef CONFIG_BRIDGE_NETFILTER
		NFA_PUT(inst->skb, NFULA_IFINDEX_INDEV, sizeof(tmp_uint),
			&tmp_uint);
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical input device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSINDEV,
				sizeof(tmp_uint), &tmp_uint);
			/* this is the bridge group "brX" */
			tmp_uint = htonl(indev->br_port->br->dev->ifindex);
			NFA_PUT(inst->skb, NFULA_IFINDEX_INDEV,
				sizeof(tmp_uint), &tmp_uint);
		} else {
			/* Case 2: indev is bridge group, we need to look for
			 * physical device (when called from ipv4) */
			NFA_PUT(inst->skb, NFULA_IFINDEX_INDEV,
				sizeof(tmp_uint), &tmp_uint);
			if (skb->nf_bridge && skb->nf_bridge->physindev) {
				tmp_uint = 
				    htonl(skb->nf_bridge->physindev->ifindex);
				NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSINDEV,
					sizeof(tmp_uint), &tmp_uint);
			}
		}
#endif
	}

	if (outdev) {
		tmp_uint = htonl(outdev->ifindex);
#ifndef CONFIG_BRIDGE_NETFILTER
		NFA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV, sizeof(tmp_uint),
			&tmp_uint);
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical output device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
				sizeof(tmp_uint), &tmp_uint);
			/* this is the bridge group "brX" */
			tmp_uint = htonl(outdev->br_port->br->dev->ifindex);
			NFA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV,
				sizeof(tmp_uint), &tmp_uint);
		} else {
			/* Case 2: indev is a bridge group, we need to look
			 * for physical device (when called from ipv4) */
			NFA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV,
				sizeof(tmp_uint), &tmp_uint);
			if (skb->nf_bridge) {
				tmp_uint = 
				    htonl(skb->nf_bridge->physoutdev->ifindex);
				NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
					sizeof(tmp_uint), &tmp_uint);
			}
		}
#endif
	}

	if (skb->nfmark) {
		tmp_uint = htonl(skb->nfmark);
		NFA_PUT(inst->skb, NFULA_MARK, sizeof(tmp_uint), &tmp_uint);
	}

	if (indev && skb->dev && skb->dev->hard_header_parse) {
		struct nfulnl_msg_packet_hw phw;

		phw.hw_addrlen = 
			skb->dev->hard_header_parse((struct sk_buff *)skb, 
						    phw.hw_addr);
		phw.hw_addrlen = htons(phw.hw_addrlen);
		NFA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw);
	}

	if (skb->tstamp.off_sec) {
		struct nfulnl_msg_packet_timestamp ts;

		ts.sec = cpu_to_be64(skb->tstamp.off_sec);
		ts.usec = cpu_to_be64(skb->tstamp.off_usec);

		NFA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts);
	}

	/* UID */
	if (skb->sk) {
		read_lock_bh(&skb->sk->sk_callback_lock);
		if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
			u_int32_t uid = htonl(skb->sk->sk_socket->file->f_uid);
			/* need to unlock here since NFA_PUT may goto */
			read_unlock_bh(&skb->sk->sk_callback_lock);
			NFA_PUT(inst->skb, NFULA_UID, sizeof(uid), &uid);
		} else
			read_unlock_bh(&skb->sk->sk_callback_lock);
	}

	if (data_len) {
		struct nfattr *nfa;
		int size = NFA_LENGTH(data_len);

		if (skb_tailroom(inst->skb) < (int)NFA_SPACE(data_len)) {
			printk(KERN_WARNING "nfnetlink_log: no tailroom!\n");
			goto nlmsg_failure;
		}

		nfa = (struct nfattr *)skb_put(inst->skb, NFA_ALIGN(size));
		nfa->nfa_type = NFULA_PAYLOAD;
		nfa->nfa_len = size;

		if (skb_copy_bits(skb, 0, NFA_DATA(nfa), data_len))
			BUG();
	}
		
	nlh->nlmsg_len = inst->skb->tail - old_tail;
	return 0;

nlmsg_failure:
	UDEBUG("nlmsg_failure\n");
nfattr_failure:
	PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
	return -1;
}
Пример #19
0
static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
{
	struct net_device *dev;
	struct sk_buff *frag;
	struct rt6_info *rt = (struct rt6_info*)skb->dst;
	struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
	struct ipv6hdr *tmp_hdr;
	struct frag_hdr *fh;
	unsigned int mtu, hlen, left, len;
	__be32 frag_id = 0;
	int ptr, offset = 0, err=0;
	u8 *prevhdr, nexthdr = 0;

	dev = rt->u.dst.dev;
	hlen = ip6_find_1stfragopt(skb, &prevhdr);
	nexthdr = *prevhdr;

	mtu = dst_mtu(&rt->u.dst);
	if (np && np->frag_size < mtu) {
		if (np->frag_size)
			mtu = np->frag_size;
	}
	mtu -= hlen + sizeof(struct frag_hdr);

	if (skb_shinfo(skb)->frag_list) {
		int first_len = skb_pagelen(skb);

		if (first_len - hlen > mtu ||
		    ((first_len - hlen) & 7) ||
		    skb_cloned(skb))
			goto slow_path;

		for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
			/* Correct geometry. */
			if (frag->len > mtu ||
			    ((frag->len & 7) && frag->next) ||
			    skb_headroom(frag) < hlen)
			    goto slow_path;

			/* Partially cloned skb? */
			if (skb_shared(frag))
				goto slow_path;

			BUG_ON(frag->sk);
			if (skb->sk) {
				sock_hold(skb->sk);
				frag->sk = skb->sk;
				frag->destructor = sock_wfree;
				skb->truesize -= frag->truesize;
			}
		}

		err = 0;
		offset = 0;
		frag = skb_shinfo(skb)->frag_list;
		skb_shinfo(skb)->frag_list = NULL;
		/* BUILD HEADER */

		*prevhdr = NEXTHDR_FRAGMENT;
		tmp_hdr = kmemdup(skb->nh.raw, hlen, GFP_ATOMIC);
		if (!tmp_hdr) {
			IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
			return -ENOMEM;
		}

		__skb_pull(skb, hlen);
		fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
		skb->nh.raw = __skb_push(skb, hlen);
		memcpy(skb->nh.raw, tmp_hdr, hlen);

		ipv6_select_ident(skb, fh);
		fh->nexthdr = nexthdr;
		fh->reserved = 0;
		fh->frag_off = htons(IP6_MF);
		frag_id = fh->identification;

		first_len = skb_pagelen(skb);
		skb->data_len = first_len - skb_headlen(skb);
		skb->len = first_len;
		skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr));

		dst_hold(&rt->u.dst);

		for (;;) {
			/* Prepare header of the next frame,
			 * before previous one went down. */
			if (frag) {
				frag->ip_summed = CHECKSUM_NONE;
				frag->h.raw = frag->data;
				fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
				frag->nh.raw = __skb_push(frag, hlen);
				memcpy(frag->nh.raw, tmp_hdr, hlen);
				offset += skb->len - hlen - sizeof(struct frag_hdr);
				fh->nexthdr = nexthdr;
				fh->reserved = 0;
				fh->frag_off = htons(offset);
				if (frag->next != NULL)
					fh->frag_off |= htons(IP6_MF);
				fh->identification = frag_id;
				frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
				ip6_copy_metadata(frag, skb);
			}

			err = output(skb);
			if(!err)
				IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGCREATES);

			if (err || !frag)
				break;

			skb = frag;
			frag = skb->next;
			skb->next = NULL;
		}

		kfree(tmp_hdr);

		if (err == 0) {
			IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGOKS);
			dst_release(&rt->u.dst);
			return 0;
		}

		while (frag) {
			skb = frag->next;
			kfree_skb(frag);
			frag = skb;
		}

		IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGFAILS);
		dst_release(&rt->u.dst);
		return err;
	}

slow_path:
	left = skb->len - hlen;		/* Space per frame */
	ptr = hlen;			/* Where to start from */

	/*
	 *	Fragment the datagram.
	 */

	*prevhdr = NEXTHDR_FRAGMENT;

	/*
	 *	Keep copying data until we run out.
	 */
	while(left > 0)	{
		len = left;
		/* IF: it doesn't fit, use 'mtu' - the data space left */
		if (len > mtu)
			len = mtu;
		/* IF: we are not sending upto and including the packet end
		   then align the next start on an eight byte boundary */
		if (len < left)	{
			len &= ~7;
		}
		/*
		 *	Allocate buffer.
		 */

		if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
			NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
			IP6_INC_STATS(ip6_dst_idev(skb->dst),
				      IPSTATS_MIB_FRAGFAILS);
			err = -ENOMEM;
			goto fail;
		}

		/*
		 *	Set up data on packet
		 */

		ip6_copy_metadata(frag, skb);
		skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
		frag->nh.raw = frag->data;
		fh = (struct frag_hdr*)(frag->data + hlen);
		frag->h.raw = frag->data + hlen + sizeof(struct frag_hdr);

		/*
		 *	Charge the memory for the fragment to any owner
		 *	it might possess
		 */
		if (skb->sk)
			skb_set_owner_w(frag, skb->sk);

		/*
		 *	Copy the packet header into the new buffer.
		 */
		memcpy(frag->nh.raw, skb->data, hlen);

		/*
		 *	Build fragment header.
		 */
		fh->nexthdr = nexthdr;
		fh->reserved = 0;
		if (!frag_id) {
			ipv6_select_ident(skb, fh);
			frag_id = fh->identification;
		} else
			fh->identification = frag_id;

		/*
		 *	Copy a block of the IP datagram.
		 */
		if (skb_copy_bits(skb, ptr, frag->h.raw, len))
			BUG();
		left -= len;

		fh->frag_off = htons(offset);
		if (left > 0)
			fh->frag_off |= htons(IP6_MF);
		frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));

		ptr += len;
		offset += len;

		/*
		 *	Put this fragment into the sending queue.
		 */
		err = output(frag);
		if (err)
			goto fail;

		IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGCREATES);
	}
	IP6_INC_STATS(ip6_dst_idev(skb->dst),
		      IPSTATS_MIB_FRAGOKS);
	kfree_skb(skb);
	return err;

fail:
	IP6_INC_STATS(ip6_dst_idev(skb->dst),
		      IPSTATS_MIB_FRAGFAILS);
	kfree_skb(skb);
	return err;
}
Пример #20
0
static int help(struct sk_buff **pskb,
                struct ip_conntrack *ct, enum ip_conntrack_info ctinfo)
{
	struct ts_state ts;
	struct ip_conntrack_expect *exp;
	unsigned int dataoff, start, stop, off, i;
	char pbuf[sizeof("65535")], *tmp;
	u_int16_t port, len;
	int ret = NF_ACCEPT;

	/* Only look at packets from the Amanda server */
	if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
		return NF_ACCEPT;

	/* increase the UDP timeout of the master connection as replies from
	 * Amanda clients to the server can be quite delayed */
	ip_ct_refresh(ct, *pskb, master_timeout * HZ);

	/* No data? */
	dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
	if (dataoff >= (*pskb)->len) {
		if (net_ratelimit())
			printk("amanda_help: skblen = %u\n", (*pskb)->len);
		return NF_ACCEPT;
	}

	memset(&ts, 0, sizeof(ts));
	start = skb_find_text(*pskb, dataoff, (*pskb)->len,
			      search[SEARCH_CONNECT].ts, &ts);
	if (start == UINT_MAX)
		goto out;
	start += dataoff + search[SEARCH_CONNECT].len;

	memset(&ts, 0, sizeof(ts));
	stop = skb_find_text(*pskb, start, (*pskb)->len,
			     search[SEARCH_NEWLINE].ts, &ts);
	if (stop == UINT_MAX)
		goto out;
	stop += start;

	for (i = SEARCH_DATA; i <= SEARCH_INDEX; i++) {
		memset(&ts, 0, sizeof(ts));
		off = skb_find_text(*pskb, start, stop, search[i].ts, &ts);
		if (off == UINT_MAX)
			continue;
		off += start + search[i].len;

		len = min_t(unsigned int, sizeof(pbuf) - 1, stop - off);
		if (skb_copy_bits(*pskb, off, pbuf, len))
			break;
		pbuf[len] = '\0';

		port = simple_strtoul(pbuf, &tmp, 10);
		len = tmp - pbuf;
		if (port == 0 || len > 5)
			break;

		exp = ip_conntrack_expect_alloc(ct);
		if (exp == NULL) {
			ret = NF_DROP;
			goto out;
		}

		exp->expectfn = NULL;
		exp->flags = 0;

		exp->tuple.src.ip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip;
		exp->tuple.src.u.tcp.port = 0;
		exp->tuple.dst.ip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip;
		exp->tuple.dst.protonum = IPPROTO_TCP;
		exp->tuple.dst.u.tcp.port = htons(port);

		exp->mask.src.ip = htonl(0xFFFFFFFF);
		exp->mask.src.u.tcp.port = 0;
		exp->mask.dst.ip = htonl(0xFFFFFFFF);
		exp->mask.dst.protonum = 0xFF;
		exp->mask.dst.u.tcp.port = htons(0xFFFF);

		if (ip_nat_amanda_hook)
			ret = ip_nat_amanda_hook(pskb, ctinfo, off - dataoff,
						 len, exp);
		else if (ip_conntrack_expect_related(exp) != 0)
			ret = NF_DROP;
		ip_conntrack_expect_put(exp);
	}

out:
	return ret;
}
Пример #21
0
static int ath_rx_assemble_buf(struct ath_softc *sc, wbuf_t *wbuf, 
                        ieee80211_rx_status_t *status, 
                        u_int16_t keyix) 
{

    struct ath_buf *bf = NULL;
    struct sk_buff *kb = NULL; 

    wbuf_t nwbuf;

    if (wbuf_next(*wbuf)) { /* for linux multiple receive buffer */


          bf = ATH_GET_RX_CONTEXT_BUF(wbuf_next(*wbuf));
          bf->bf_mpdu = wbuf_next(*wbuf);
          /* unlink the top half buffer and bottom half buffer */
          wbuf_setnextpkt(*wbuf, NULL); 
          /* use skb_copy_expand for combining two buffer, nwbuf = wbuf##twbuf */
          nwbuf = skb_copy_expand((struct sk_buff *)*wbuf, 
                  0, wbuf_get_pktlen(*wbuf)+wbuf_get_pktlen(bf->bf_mpdu),
                  GFP_ATOMIC);

          if (nwbuf != NULL) {
              skb_copy_bits(bf->bf_mpdu, 0, 
                      wbuf_header(nwbuf) + wbuf_get_pktlen(*wbuf), 
                      wbuf_get_pktlen(bf->bf_mpdu));
              kb = (struct sk_buff *)nwbuf;
              ((struct ieee80211_cb *)kb->cb)->context =
                       &(((struct ieee80211_cb *)kb->cb)[1]);
              skb_put(nwbuf,wbuf_get_pktlen(bf->bf_mpdu));

              wbuf_free(bf->bf_mpdu); 
              bf->bf_mpdu = nwbuf;
              ATH_SET_RX_CONTEXT_BUF(nwbuf, bf);
         } else {

              ATH_GET_RX_CONTEXT_BUF(*wbuf)->bf_status |= ATH_BUFSTATUS_FREE; 

              if (sc->sc_enhanceddmasupport) {
                wbuf_push(*wbuf, sc->sc_rxstatuslen);
                wbuf_push(bf->bf_mpdu, sc->sc_rxstatuslen);
              }

              wbuf_trim(bf->bf_mpdu, wbuf_get_pktlen(bf->bf_mpdu));
              wbuf_trim(*wbuf, wbuf_get_pktlen(*wbuf));

              bf->bf_buf_addr[0] = wbuf_map_single(sc->sc_osdev, 
                                                   bf->bf_mpdu, 
                                                   BUS_DMA_FROMDEVICE, 
                                                   OS_GET_DMA_MEM_CONTEXT(bf, bf_dmacontext));

              /* relink unused wbuf to H/W */
              ath_rx_requeue(sc, *wbuf);
              ath_rx_requeue(sc, bf->bf_mpdu);
              return -1;
         }

           ATH_GET_RX_CONTEXT_BUF(*wbuf)->bf_status |= ATH_BUFSTATUS_FREE; 

           if (sc->sc_enhanceddmasupport) {
              wbuf_push(*wbuf, sc->sc_rxstatuslen);
           }

           wbuf_trim(*wbuf, wbuf_get_pktlen(*wbuf));
           /* relink unused wbuf to H/W */
           ath_rx_requeue(sc, *wbuf);
           *wbuf = bf->bf_mpdu;
    }
    return 0;

}
/* Returns verdict for packet, or -1 for invalid. */
static int tcp_packet(struct ip_conntrack *conntrack,
		      const struct sk_buff *skb,
		      enum ip_conntrack_info ctinfo)
{
	enum tcp_conntrack newconntrack, oldtcpstate;
	struct tcphdr tcph;

	if (skb_copy_bits(skb, skb->nh.iph->ihl * 4, &tcph, sizeof(tcph)) != 0)
		return -1;
	if (skb->len < skb->nh.iph->ihl * 4 + tcph.doff * 4)
		return -1;

	/* If only reply is a RST, we can consider ourselves not to
	   have an established connection: this is a fairly common
	   problem case, so we can delete the conntrack
	   immediately.  --RR */
	if (!test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status) && tcph.rst) {
		if (del_timer(&conntrack->timeout))
			conntrack->timeout.function((unsigned long)conntrack);
		return NF_ACCEPT;
	}

	WRITE_LOCK(&tcp_lock);
	oldtcpstate = conntrack->proto.tcp.state;
	newconntrack
		= tcp_conntracks
		[CTINFO2DIR(ctinfo)]
		[get_conntrack_index(&tcph)][oldtcpstate];

	/* Invalid */
	if (newconntrack == TCP_CONNTRACK_MAX) {
		DEBUGP("ip_conntrack_tcp: Invalid dir=%i index=%u conntrack=%u\n",
		       CTINFO2DIR(ctinfo), get_conntrack_index(&tcph),
		       conntrack->proto.tcp.state);
		WRITE_UNLOCK(&tcp_lock);
		return -1;
	}

	conntrack->proto.tcp.state = newconntrack;

	/* Poor man's window tracking: record SYN/ACK for handshake check */
	if (oldtcpstate == TCP_CONNTRACK_SYN_SENT
	    && CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY
	    && tcph.syn && tcph.ack) {
		conntrack->proto.tcp.handshake_ack
			= htonl(ntohl(tcph.seq) + 1);
		goto out;
	}

	/* Set ASSURED if we see valid ack in ESTABLISHED after SYN_RECV */
	if (oldtcpstate == TCP_CONNTRACK_SYN_RECV
	    && CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL
	    && tcph.ack && !tcph.syn
	    && tcph.ack_seq == conntrack->proto.tcp.handshake_ack)
		set_bit(IPS_ASSURED_BIT, &conntrack->status);

out:	WRITE_UNLOCK(&tcp_lock);
	ip_ct_refresh(conntrack, *tcp_timeouts[newconntrack]);

	return NF_ACCEPT;
}
Пример #23
0
/* This is an inline function, we don't really care about a long
 * list of arguments */
static inline int
__build_packet_message(struct nfulnl_instance *inst,
			const struct sk_buff *skb,
			unsigned int data_len,
			u_int8_t pf,
			unsigned int hooknum,
			const struct net_device *indev,
			const struct net_device *outdev,
			const char *prefix, unsigned int plen)
{
	struct nfulnl_msg_packet_hdr pmsg;
	struct nlmsghdr *nlh;
	struct nfgenmsg *nfmsg;
	sk_buff_data_t old_tail = inst->skb->tail;

	nlh = NLMSG_PUT(inst->skb, 0, 0,
			NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
			sizeof(struct nfgenmsg));
	nfmsg = NLMSG_DATA(nlh);
	nfmsg->nfgen_family = pf;
	nfmsg->version = NFNETLINK_V0;
	nfmsg->res_id = htons(inst->group_num);

	pmsg.hw_protocol	= skb->protocol;
	pmsg.hook		= hooknum;

	NLA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg);

	if (prefix)
		NLA_PUT(inst->skb, NFULA_PREFIX, plen, prefix);

	if (indev) {
#ifndef CONFIG_BRIDGE_NETFILTER
		NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
			     htonl(indev->ifindex));
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical input device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
				     htonl(indev->ifindex));
			/* this is the bridge group "brX" */
			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
				     htonl(br_port_get_rcu(indev)->br->dev->ifindex));
		} else {
			/* Case 2: indev is bridge group, we need to look for
			 * physical device (when called from ipv4) */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
				     htonl(indev->ifindex));
			if (skb->nf_bridge && skb->nf_bridge->physindev)
				NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
					     htonl(skb->nf_bridge->physindev->ifindex));
		}
#endif
	}

	if (outdev) {
#ifndef CONFIG_BRIDGE_NETFILTER
		NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
			     htonl(outdev->ifindex));
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical output device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
				     htonl(outdev->ifindex));
			/* this is the bridge group "brX" */
			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
				     htonl(br_port_get_rcu(outdev)->br->dev->ifindex));
		} else {
			/* Case 2: indev is a bridge group, we need to look
			 * for physical device (when called from ipv4) */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
				     htonl(outdev->ifindex));
			if (skb->nf_bridge && skb->nf_bridge->physoutdev)
				NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
					     htonl(skb->nf_bridge->physoutdev->ifindex));
		}
#endif
	}

	if (skb->mark)
		NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark));

	if (indev && skb->dev &&
	    skb->mac_header != skb->network_header) {
		struct nfulnl_msg_packet_hw phw;
		int len = dev_parse_header(skb, phw.hw_addr);
		if (len > 0) {
			phw.hw_addrlen = htons(len);
			NLA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw);
		}
	}

	if (indev && skb_mac_header_was_set(skb)) {
		NLA_PUT_BE16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type));
		NLA_PUT_BE16(inst->skb, NFULA_HWLEN,
			     htons(skb->dev->hard_header_len));
		NLA_PUT(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
			skb_mac_header(skb));
	}

	if (skb->tstamp.tv64) {
		struct nfulnl_msg_packet_timestamp ts;
		struct timeval tv = ktime_to_timeval(skb->tstamp);
		ts.sec = cpu_to_be64(tv.tv_sec);
		ts.usec = cpu_to_be64(tv.tv_usec);

		NLA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts);
	}

	/* UID */
	if (skb->sk) {
		read_lock_bh(&skb->sk->sk_callback_lock);
		if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
			struct file *file = skb->sk->sk_socket->file;
			__be32 uid = htonl(file->f_cred->fsuid);
			__be32 gid = htonl(file->f_cred->fsgid);
			/* need to unlock here since NLA_PUT may goto */
			read_unlock_bh(&skb->sk->sk_callback_lock);
			NLA_PUT_BE32(inst->skb, NFULA_UID, uid);
			NLA_PUT_BE32(inst->skb, NFULA_GID, gid);
		} else
			read_unlock_bh(&skb->sk->sk_callback_lock);
	}

	/* local sequence number */
	if (inst->flags & NFULNL_CFG_F_SEQ)
		NLA_PUT_BE32(inst->skb, NFULA_SEQ, htonl(inst->seq++));

	/* global sequence number */
	if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
		NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
			     htonl(atomic_inc_return(&global_seq)));

	if (data_len) {
		struct nlattr *nla;
		int size = nla_attr_size(data_len);

		if (skb_tailroom(inst->skb) < nla_total_size(data_len)) {
			printk(KERN_WARNING "nfnetlink_log: no tailroom!\n");
			goto nlmsg_failure;
		}

		nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len));
		nla->nla_type = NFULA_PAYLOAD;
		nla->nla_len = size;

		if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
			BUG();
	}

	nlh->nlmsg_len = inst->skb->tail - old_tail;
	return 0;

nlmsg_failure:
nla_put_failure:
	PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
	return -1;
}
Пример #24
0
static int esp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
{
	struct ipv6hdr *iph;
	struct ipv6_esp_hdr *esph;
	struct esp_data *esp = x->data;
	struct sk_buff *trailer;
	int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4);
	int alen = esp->auth.icv_trunc_len;
	int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen;

	int hdr_len = skb->h.raw - skb->nh.raw;
	int nfrags;
	unsigned char *tmp_hdr = NULL;
	int ret = 0;

	if (!pskb_may_pull(skb, sizeof(struct ipv6_esp_hdr))) {
		ret = -EINVAL;
		goto out_nofree;
	}

	if (elen <= 0 || (elen & (blksize-1))) {
		ret = -EINVAL;
		goto out_nofree;
	}

	tmp_hdr = kmalloc(hdr_len, GFP_ATOMIC);
	if (!tmp_hdr) {
		ret = -ENOMEM;
		goto out_nofree;
	}
	memcpy(tmp_hdr, skb->nh.raw, hdr_len);

	/* If integrity check is required, do this. */
        if (esp->auth.icv_full_len) {
		u8 sum[esp->auth.icv_full_len];
		u8 sum1[alen];

		esp->auth.icv(esp, skb, 0, skb->len-alen, sum);

		if (skb_copy_bits(skb, skb->len-alen, sum1, alen))
			BUG();

		if (unlikely(memcmp(sum, sum1, alen))) {
			x->stats.integrity_failed++;
			ret = -EINVAL;
			goto out;
		}
	}

	if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) {
		ret = -EINVAL;
		goto out;
	}

	skb->ip_summed = CHECKSUM_NONE;

	esph = (struct ipv6_esp_hdr*)skb->data;
	iph = skb->nh.ipv6h;

	/* Get ivec. This can be wrong, check against another impls. */
	if (esp->conf.ivlen)
		crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm));

        {
		u8 nexthdr[2];
		struct scatterlist *sg = &esp->sgbuf[0];
		u8 padlen;

		if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
			sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
			if (!sg) {
				ret = -ENOMEM;
				goto out;
			}
		}
		skb_to_sgvec(skb, sg, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen, elen);
		crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen);
		if (unlikely(sg != &esp->sgbuf[0]))
			kfree(sg);

		if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
			BUG();

		padlen = nexthdr[0];
		if (padlen+2 >= elen) {
			LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage padlen=%d, elen=%d\n", padlen+2, elen);
			ret = -EINVAL;
			goto out;
		}
		/* ... check padding bits here. Silly. :-) */ 

		pskb_trim(skb, skb->len - alen - padlen - 2);
		skb->h.raw = skb_pull(skb, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen);
		skb->nh.raw += sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen;
		memcpy(skb->nh.raw, tmp_hdr, hdr_len);
		skb->nh.ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
		ret = nexthdr[1];
	}

out:
	kfree(tmp_hdr);
out_nofree:
	return ret;
}
/*
 * Note: detecting truncated vs. non-truncated authentication data is very
 * expensive, so we only support truncated data, which is the recommended
 * and common case.
 */
static int esp_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
{
	struct iphdr *iph;
	struct ip_esp_hdr *esph;
	struct esp_data *esp = x->data;
	struct sk_buff *trailer;
	int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4);
	int alen = esp->auth.icv_trunc_len;
	int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen;
	int nfrags;
	int encap_len = 0;
	u8 nexthdr[2];
	struct scatterlist *sg;
	u8 workbuf[60];
	int padlen;

	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr)))
		goto out;
	esph = (struct ip_esp_hdr*)skb->data;

	if (elen <= 0 || (elen & (blksize-1)))
		goto out;

	/* If integrity check is required, do this. */
	if (esp->auth.icv_full_len) {
		u8 sum[esp->auth.icv_full_len];
		u8 sum1[alen];
		if (x->props.replay_window && xfrm_replay_check(x, esph->seq_no))
			goto out;
		esp->auth.icv(esp, skb, 0, skb->len-alen, sum);

		if (skb_copy_bits(skb, skb->len-alen, sum1, alen))
			BUG();

		if (unlikely(memcmp(sum, sum1, alen))) {
			x->stats.integrity_failed++;
			goto out;
		}

		if (x->props.replay_window)
			xfrm_replay_advance(x, esph->seq_no);
	}

	if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0)
		goto out;

	skb->ip_summed = CHECKSUM_NONE;

	iph = skb->nh.iph;

	/* Get ivec. This can be wrong, check against another impls. */
	if (esp->conf.ivlen)
		crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm));

	sg = &esp->sgbuf[0];

	if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
		sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
		if (!sg)
			goto out;
	}
	skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen);
	crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen);
	if (unlikely(sg != &esp->sgbuf[0]))
		kfree(sg);

	if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
		BUG();

	padlen = nexthdr[0];
	if (padlen+2 >= elen)
		goto out;

	/* ... check padding bits here. Silly. :-) */ 

	if (x->encap) {
		struct xfrm_encap_tmpl *encap = x->encap;
		struct udphdr *uh;

		if (encap->encap_type != decap->decap_type)
			goto out;

		uh = (struct udphdr *)(iph + 1);
		encap_len = (void*)esph - (void*)uh;

		/*
		 * 1) if the NAT-T peer's IP or port changed then
		 *    advertize the change to the keying daemon.
		 *    This is an inbound SA, so just compare
		 *    SRC ports.
		 */
		if (iph->saddr != x->props.saddr.a4 ||
		    uh->source != encap->encap_sport) {
			xfrm_address_t ipaddr;

			ipaddr.a4 = iph->saddr;
			km_new_mapping(x, &ipaddr, uh->source);
				
			/* XXX: perhaps add an extra
			 * policy check here, to see
			 * if we should allow or
			 * reject a packet from a
			 * different source
			 * address/port.
			 */
		}
	
		/*
		 * 2) ignore UDP/TCP checksums in case
		 *    of NAT-T in Transport Mode, or
		 *    perform other post-processing fixes
		 *    as per draft-ietf-ipsec-udp-encaps-06,
		 *    section 3.1.2
		 */
		if (!x->props.mode)
			skb->ip_summed = CHECKSUM_UNNECESSARY;
	}

	iph->protocol = nexthdr[1];
	pskb_trim(skb, skb->len - alen - padlen - 2);
	memcpy(workbuf, skb->nh.raw, iph->ihl*4);
	skb->h.raw = skb_pull(skb, sizeof(struct ip_esp_hdr) + esp->conf.ivlen);
	skb->nh.raw += encap_len + sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
	memcpy(skb->nh.raw, workbuf, iph->ihl*4);
	skb->nh.iph->tot_len = htons(skb->len);

	return 0;

out:
	return -EINVAL;
}
Пример #26
0
static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
{
    int port = p->port;
    struct net_device *netdev = p->netdev;
    union cvmx_mixx_ircnt mix_ircnt;
    union mgmt_port_ring_entry re;
    struct sk_buff *skb;
    struct sk_buff *skb2;
    struct sk_buff *skb_new;
    union mgmt_port_ring_entry re2;
    int rc = 1;


    re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
    if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
        /* A good packet, send it up. */
        skb_put(skb, re.s.len);
good:
        skb->protocol = eth_type_trans(skb, netdev);
        netdev->stats.rx_packets++;
        netdev->stats.rx_bytes += skb->len;
        netif_receive_skb(skb);
        rc = 0;
    } else if (re.s.code == RING_ENTRY_CODE_MORE) {
        /*
         * Packet split across skbs.  This can happen if we
         * increase the MTU.  Buffers that are already in the
         * rx ring can then end up being too small.  As the rx
         * ring is refilled, buffers sized for the new MTU
         * will be used and we should go back to the normal
         * non-split case.
         */
        skb_put(skb, re.s.len);
        do {
            re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
            if (re2.s.code != RING_ENTRY_CODE_MORE
                    && re2.s.code != RING_ENTRY_CODE_DONE)
                goto split_error;
            skb_put(skb2,  re2.s.len);
            skb_new = skb_copy_expand(skb, 0, skb2->len,
                                      GFP_ATOMIC);
            if (!skb_new)
                goto split_error;
            if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
                              skb2->len))
                goto split_error;
            skb_put(skb_new, skb2->len);
            dev_kfree_skb_any(skb);
            dev_kfree_skb_any(skb2);
            skb = skb_new;
        } while (re2.s.code == RING_ENTRY_CODE_MORE);
        goto good;
    } else {
        /* Some other error, discard it. */
        dev_kfree_skb_any(skb);
        /*
         * Error statistics are accumulated in
         * octeon_mgmt_update_rx_stats.
         */
    }
    goto done;
split_error:
    /* Discard the whole mess. */
    dev_kfree_skb_any(skb);
    dev_kfree_skb_any(skb2);
    while (re2.s.code == RING_ENTRY_CODE_MORE) {
        re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
        dev_kfree_skb_any(skb2);
    }
    netdev->stats.rx_errors++;

done:
    /* Tell the hardware we processed a packet.  */
    mix_ircnt.u64 = 0;
    mix_ircnt.s.ircnt = 1;
    cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64);
    return rc;
}
Пример #27
0
/*
 * Note: detecting truncated vs. non-truncated authentication data is very
 * expensive, so we only support truncated data, which is the recommended
 * and common case.
 */
static int esp_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
{
	struct iphdr *iph;
	struct ip_esp_hdr *esph;
	struct esp_data *esp = x->data;
	struct sk_buff *trailer;
	int blksize = crypto_tfm_alg_blocksize(esp->conf.tfm);
	int alen = esp->auth.icv_trunc_len;
	int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen;
	int nfrags;
	int encap_len = 0;

	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr)))
		goto out;

	if (elen <= 0 || (elen & (blksize-1)))
		goto out;

	/* If integrity check is required, do this. */
	if (esp->auth.icv_full_len) {
		u8 sum[esp->auth.icv_full_len];
		u8 sum1[alen];
		
		esp->auth.icv(esp, skb, 0, skb->len-alen, sum);

		if (skb_copy_bits(skb, skb->len-alen, sum1, alen))
			BUG();

		if (unlikely(memcmp(sum, sum1, alen))) {
			x->stats.integrity_failed++;
			goto out;
		}
	}

	if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0)
		goto out;

	skb->ip_summed = CHECKSUM_NONE;

	esph = (struct ip_esp_hdr*)skb->data;
	iph = skb->nh.iph;

	/* Get ivec. This can be wrong, check against another impls. */
	if (esp->conf.ivlen)
		crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm));

        {
		u8 nexthdr[2];
		struct scatterlist *sg = &esp->sgbuf[0];
		u8 workbuf[60];
		int padlen;

		if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
			sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
			if (!sg)
				goto out;
		}
		skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen);
		crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen);
		if (unlikely(sg != &esp->sgbuf[0]))
			kfree(sg);

		if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
			BUG();

		padlen = nexthdr[0];
		if (padlen+2 >= elen)
			goto out;

		/* ... check padding bits here. Silly. :-) */ 

		if (x->encap && decap && decap->decap_type) {
			struct esp_decap_data *encap_data;
			struct udphdr *uh = (struct udphdr *) (iph+1);

			encap_data = (struct esp_decap_data *) (decap->decap_data);
			encap_data->proto = 0;

			switch (decap->decap_type) {
			case UDP_ENCAP_ESPINUDP:
			case UDP_ENCAP_ESPINUDP_NON_IKE:
				encap_data->proto = AF_INET;
				encap_data->saddr.a4 = iph->saddr;
				encap_data->sport = uh->source;
				encap_len = (void*)esph - (void*)uh;
				break;

			default:
				goto out;
			}
		}

		iph->protocol = nexthdr[1];
		pskb_trim(skb, skb->len - alen - padlen - 2);
		memcpy(workbuf, skb->nh.raw, iph->ihl*4);
		skb->h.raw = skb_pull(skb, sizeof(struct ip_esp_hdr) + esp->conf.ivlen);
		skb->nh.raw += encap_len + sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
		memcpy(skb->nh.raw, workbuf, iph->ihl*4);
		skb->nh.iph->tot_len = htons(skb->len);
	}

	return 0;

out:
	return -EINVAL;
}
Пример #28
0
/*
 * Note: detecting truncated vs. non-truncated authentication data is very
 * expensive, so we only support truncated data, which is the recommended
 * and common case.
 */
static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
{
	struct iphdr *iph;
	struct ip_esp_hdr *esph;
	struct esp_data *esp = x->data;
	struct crypto_blkcipher *tfm = esp->conf.tfm;
	struct blkcipher_desc desc = { .tfm = tfm };
	struct sk_buff *trailer;
	int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
	int alen = esp->auth.icv_trunc_len;
	int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen;
	int nfrags;
	int ihl;
	u8 nexthdr[2];
	struct scatterlist *sg;
	int padlen;
	int err;

	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr)))
		goto out;

	if (elen <= 0 || (elen & (blksize-1)))
		goto out;

	/* If integrity check is required, do this. */
	if (esp->auth.icv_full_len) {
		u8 sum[alen];

		err = esp_mac_digest(esp, skb, 0, skb->len - alen);
		if (err)
			goto out;

		if (skb_copy_bits(skb, skb->len - alen, sum, alen))
			BUG();

		if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) {
			x->stats.integrity_failed++;
			goto out;
		}
	}

	if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0)
		goto out;

	skb->ip_summed = CHECKSUM_NONE;

	esph = (struct ip_esp_hdr*)skb->data;

	/* Get ivec. This can be wrong, check against another impls. */
	if (esp->conf.ivlen)
		crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen);

	sg = &esp->sgbuf[0];

	if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
		sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
		if (!sg)
			goto out;
	}
	skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen);
	err = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
	if (unlikely(sg != &esp->sgbuf[0]))
		kfree(sg);
	if (unlikely(err))
		return err;

	if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
		BUG();

	padlen = nexthdr[0];
	if (padlen+2 >= elen)
		goto out;

	/* ... check padding bits here. Silly. :-) */

	iph = ip_hdr(skb);
	ihl = iph->ihl * 4;

	if (x->encap) {
		struct xfrm_encap_tmpl *encap = x->encap;
		struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);

		/*
		 * 1) if the NAT-T peer's IP or port changed then
		 *    advertize the change to the keying daemon.
		 *    This is an inbound SA, so just compare
		 *    SRC ports.
		 */
		if (iph->saddr != x->props.saddr.a4 ||
		    uh->source != encap->encap_sport) {
			xfrm_address_t ipaddr;

			ipaddr.a4 = iph->saddr;
			km_new_mapping(x, &ipaddr, uh->source);

			/* XXX: perhaps add an extra
			 * policy check here, to see
			 * if we should allow or
			 * reject a packet from a
			 * different source
			 * address/port.
			 */
		}

		/*
		 * 2) ignore UDP/TCP checksums in case
		 *    of NAT-T in Transport Mode, or
		 *    perform other post-processing fixes
		 *    as per draft-ietf-ipsec-udp-encaps-06,
		 *    section 3.1.2
		 */
		if (x->props.mode == XFRM_MODE_TRANSPORT ||
		    x->props.mode == XFRM_MODE_BEET)
			skb->ip_summed = CHECKSUM_UNNECESSARY;
	}

	iph->protocol = nexthdr[1];
	pskb_trim(skb, skb->len - alen - padlen - 2);
	__skb_pull(skb, sizeof(*esph) + esp->conf.ivlen);
	skb_set_transport_header(skb, -ihl);

	return 0;

out:
	return -EINVAL;
}
static int help(struct sk_buff *skb,
                struct ip_conntrack *ct, enum ip_conntrack_info ctinfo)
{
	struct ip_conntrack_expect *exp;
	struct ip_ct_amanda_expect *exp_amanda_info;
	char *data, *data_limit, *tmp;
	unsigned int dataoff, i;
	u_int16_t port, len;

	/* Only look at packets from the Amanda server */
	if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
		return NF_ACCEPT;

	/* increase the UDP timeout of the master connection as replies from
	 * Amanda clients to the server can be quite delayed */
	ip_ct_refresh(ct, master_timeout * HZ);

	/* No data? */
	dataoff = skb->nh.iph->ihl*4 + sizeof(struct udphdr);
	if (dataoff >= skb->len) {
		if (net_ratelimit())
			printk("amanda_help: skblen = %u\n", skb->len);
		return NF_ACCEPT;
	}

	LOCK_BH(&amanda_buffer_lock);
	skb_copy_bits(skb, dataoff, amanda_buffer, skb->len - dataoff);
	data = amanda_buffer;
	data_limit = amanda_buffer + skb->len - dataoff;
	*data_limit = '\0';

	/* Search for the CONNECT string */
	data = strstr(data, "CONNECT ");
	if (!data)
		goto out;
	data += strlen("CONNECT ");

	/* Only search first line. */	
	if ((tmp = strchr(data, '\n')))
		*tmp = '\0';

	for (i = 0; i < ARRAY_SIZE(conns); i++) {
		char *match = strstr(data, conns[i]);
		if (!match)
			continue;
		tmp = data = match + strlen(conns[i]);
		port = simple_strtoul(data, &data, 10);
		len = data - tmp;
		if (port == 0 || len > 5)
			break;

		exp = ip_conntrack_expect_alloc();
		if (exp == NULL)
			goto out;

		exp->tuple.src.ip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip;
		exp->tuple.dst.ip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip;
		exp->tuple.dst.protonum = IPPROTO_TCP;
		exp->mask.src.ip = 0xFFFFFFFF;
		exp->mask.dst.ip = 0xFFFFFFFF;
		exp->mask.dst.protonum = 0xFFFF;
		exp->mask.dst.u.tcp.port = 0xFFFF;

		exp_amanda_info = &exp->help.exp_amanda_info;
		exp_amanda_info->offset = data - amanda_buffer;
		exp_amanda_info->port   = port;
		exp_amanda_info->len    = len;

		exp->tuple.dst.u.tcp.port = htons(port);

		ip_conntrack_expect_related(exp, ct);
	}

out:
	UNLOCK_BH(&amanda_buffer_lock);
	return NF_ACCEPT;
}
Пример #30
0
static int help(struct sk_buff *skb,
                struct ip_conntrack *ct, enum ip_conntrack_info ctinfo)
{
    unsigned int dataoff;
    struct tcphdr tcph;
    char *data, *data_limit;
    int dir = CTINFO2DIR(ctinfo);
    struct ip_conntrack_expect *exp;
    struct ip_ct_irc_expect *exp_irc_info = NULL;

    u_int32_t dcc_ip;
    u_int16_t dcc_port;
    int i;
    char *addr_beg_p, *addr_end_p;

    DEBUGP("entered\n");

    /* If packet is coming from IRC server */
    if (dir == IP_CT_DIR_REPLY)
        return NF_ACCEPT;

    /* Until there's been traffic both ways, don't look in packets. */
    if (ctinfo != IP_CT_ESTABLISHED
            && ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
        DEBUGP("Conntrackinfo = %u\n", ctinfo);
        return NF_ACCEPT;
    }

    /* Not a full tcp header? */
    if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) != 0)
        return NF_ACCEPT;

    /* No data? */
    dataoff = skb->nh.iph->ihl*4 + tcph.doff*4;
    if (dataoff >= skb->len)
        return NF_ACCEPT;

    LOCK_BH(&ip_irc_lock);
    skb_copy_bits(skb, dataoff, irc_buffer, skb->len - dataoff);

    data = irc_buffer;
    data_limit = irc_buffer + skb->len - dataoff;

    /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24
     * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */
    while (data < (data_limit - (19 + MINMATCHLEN))) {
        if (memcmp(data, "\1DCC ", 5)) {
            data++;
            continue;
        }

        data += 5;
        /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */

        DEBUGP("DCC found in master %u.%u.%u.%u:%u %u.%u.%u.%u:%u...\n",
               NIPQUAD(iph->saddr), ntohs(tcph.source),
               NIPQUAD(iph->daddr), ntohs(tcph.dest));

        for (i = 0; i < ARRAY_SIZE(dccprotos); i++) {
            if (memcmp(data, dccprotos[i], strlen(dccprotos[i]))) {
                /* no match */
                continue;
            }

            DEBUGP("DCC %s detected\n", dccprotos[i]);
            data += strlen(dccprotos[i]);
            /* we have at least
             * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
             * data left (== 14/13 bytes) */
            if (parse_dcc((char *)data, data_limit, &dcc_ip,
                          &dcc_port, &addr_beg_p, &addr_end_p)) {
                /* unable to parse */
                DEBUGP("unable to parse dcc command\n");
                continue;
            }
            DEBUGP("DCC bound ip/port: %u.%u.%u.%u:%u\n",
                   HIPQUAD(dcc_ip), dcc_port);

            /* dcc_ip can be the internal OR external (NAT'ed) IP
             * Tiago Sousa <*****@*****.**> */
            if (ct->tuplehash[dir].tuple.src.ip != htonl(dcc_ip)
                    && ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip != htonl(dcc_ip)) {
                if (net_ratelimit())
                    printk(KERN_WARNING
                           "Forged DCC command from "
                           "%u.%u.%u.%u: %u.%u.%u.%u:%u\n",
                           NIPQUAD(ct->tuplehash[dir].tuple.src.ip),
                           HIPQUAD(dcc_ip), dcc_port);

                continue;
            }

            exp = ip_conntrack_expect_alloc();
            if (exp == NULL)
                goto out;

            exp_irc_info = &exp->help.exp_irc_info;

            /* save position of address in dcc string,
             * necessary for NAT */
            DEBUGP("tcph->seq = %u\n", tcph.seq);
            exp->seq = ntohl(tcph.seq) + (addr_beg_p - irc_buffer);
            exp_irc_info->len = (addr_end_p - addr_beg_p);
            exp_irc_info->port = dcc_port;
            DEBUGP("wrote info seq=%u (ofs=%u), len=%d\n",
                   exp->seq, (addr_end_p - _data), exp_irc_info->len);

            exp->tuple = ((struct ip_conntrack_tuple)
            { {
                    0, { 0 }
                },
                {   ct->tuplehash[dir].tuple.src.ip, { .tcp = { htons(dcc_port) } },
                    IPPROTO_TCP
                }
            });