Exemplo n.º 1
0
static int xfrm6_output_one(struct sk_buff *skb)
{
	struct dst_entry *dst = skb->dst;
	struct xfrm_state *x = dst->xfrm;
	int err;
	
	if (skb->ip_summed == CHECKSUM_HW) {
		err = skb_checksum_help(skb, 0);
		if (err)
			goto error_nolock;
	}

	if (x->props.mode) {
		err = xfrm6_tunnel_check_size(skb);
		if (err)
			goto error_nolock;
	}

	do {
		spin_lock_bh(&x->lock);
		err = xfrm_state_check(x, skb);
		if (err)
			goto error;

		err = x->mode->output(skb);
		if (err)
			goto error;

		err = x->type->output(x, skb);
		if (err)
			goto error;

		x->curlft.bytes += skb->len;
		x->curlft.packets++;

		spin_unlock_bh(&x->lock);

		skb->nh.raw = skb->data;
		
		if (!(skb->dst = dst_pop(dst))) {
			err = -EHOSTUNREACH;
			goto error_nolock;
		}
		dst = skb->dst;
		x = dst->xfrm;
	} while (x && !x->props.mode);

	IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
	err = 0;

out_exit:
	return err;
error:
	spin_unlock_bh(&x->lock);
	if (err == -EINPROGRESS)
		goto out_exit;
error_nolock:
	kfree_skb(skb);
	goto out_exit;
}
Exemplo n.º 2
0
static int xfrm6_output_one(struct sk_buff *skb)
{
	struct dst_entry *dst = skb->dst;
	struct xfrm_state *x = dst->xfrm;
	int err;

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		err = skb_checksum_help(skb);
		if (err)
			goto error_nolock;
	}

	if (x->props.mode == XFRM_MODE_TUNNEL) {
		err = xfrm6_tunnel_check_size(skb);
		if (err)
			goto error_nolock;
	}

	do {
		spin_lock_bh(&x->lock);
		err = xfrm_state_check(x, skb);
		if (err)
			goto error;

		err = x->mode->output(x, skb);
		if (err)
			goto error;

		err = x->type->output(x, skb);
		if (err)
			goto error;

		x->curlft.bytes += skb->len;
		x->curlft.packets++;
		if (x->props.mode == XFRM_MODE_ROUTEOPTIMIZATION)
			x->lastused = (u64)xtime.tv_sec;

		spin_unlock_bh(&x->lock);

		skb->nh.raw = skb->data;

		if (!(skb->dst = dst_pop(dst))) {
			err = -EHOSTUNREACH;
			goto error_nolock;
		}
		dst = skb->dst;
		x = dst->xfrm;
	} while (x && (x->props.mode != XFRM_MODE_TUNNEL));

	IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
	err = 0;

out_exit:
	return err;
error:
	spin_unlock_bh(&x->lock);
error_nolock:
	kfree_skb(skb);
	goto out_exit;
}
Exemplo n.º 3
0
static int ipip_output(struct sk_buff *skb)
{
	struct dst_entry *dst = skb->dst;
	struct xfrm_state *x = dst->xfrm;
	struct iphdr *iph, *top_iph;
	int tos, err;

	if ((err = xfrm4_tunnel_check_size(skb)) != 0)
		goto error_nolock;
		
	iph = skb->nh.iph;

	spin_lock_bh(&x->lock);

	tos = iph->tos;

	top_iph = (struct iphdr *) skb_push(skb, x->props.header_len);
	top_iph->ihl = 5;
	top_iph->version = 4;
	top_iph->tos = INET_ECN_encapsulate(tos, iph->tos);
	top_iph->tot_len = htons(skb->len);
	top_iph->frag_off = iph->frag_off & ~htons(IP_MF|IP_OFFSET);
	if (!(iph->frag_off & htons(IP_DF))) {
#ifdef NETIF_F_TSO
		__ip_select_ident(top_iph, dst, 0);
#else
		__ip_select_ident(top_iph, dst);
#endif
	}
	top_iph->ttl = iph->ttl;
	top_iph->protocol = IPPROTO_IPIP;
	top_iph->check = 0;
	top_iph->saddr = x->props.saddr.a4;
	top_iph->daddr = x->id.daddr.a4;
	memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
	ip_send_check(top_iph);

	skb->nh.raw = skb->data;
	x->curlft.bytes += skb->len;
	x->curlft.packets++;

	spin_unlock_bh(&x->lock);

	if ((skb->dst = dst_pop(dst)) == NULL) {
		kfree_skb(skb);
		err = -EHOSTUNREACH;
		goto error_nolock;
	}
	return NET_XMIT_BYPASS;

error_nolock:
	kfree_skb(skb);
	return err;
}
Exemplo n.º 4
0
int xfrm6_output(struct sk_buff **pskb)
{
	struct sk_buff *skb = *pskb;
	struct dst_entry *dst = skb->dst;
	struct xfrm_state *x = dst->xfrm;
	int err;
	
	if (skb->ip_summed == CHECKSUM_HW) {
		err = skb_checksum_help(pskb, 0);
		skb = *pskb;
		if (err)
			goto error_nolock;
	}

	if (x->props.mode) {
		err = xfrm6_tunnel_check_size(skb);
		if (err)
			goto error_nolock;
	}

	spin_lock_bh(&x->lock);
	err = xfrm_state_check(x, skb);
	if (err)
		goto error;

	xfrm6_encap(skb);

	err = x->type->output(skb);
	if (err)
		goto error;

	x->curlft.bytes += skb->len;
	x->curlft.packets++;

	spin_unlock_bh(&x->lock);

	skb->nh.raw = skb->data;
	
	if (!(skb->dst = dst_pop(dst))) {
		err = -EHOSTUNREACH;
		goto error_nolock;
	}
	err = NET_XMIT_BYPASS;

out_exit:
	return err;
error:
	spin_unlock_bh(&x->lock);
error_nolock:
	kfree_skb(skb);
	goto out_exit;
}
static int xfrm_output_one(struct sk_buff *skb, int err)
{
	struct dst_entry *dst = skb_dst(skb);
	struct xfrm_state *x = dst->xfrm;
	struct net *net = xs_net(x);

	if (err <= 0)
		goto resume;

	do {
		err = xfrm_state_check_space(x, skb);
		if (err) {
			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
			goto error_nolock;
		}

		err = x->outer_mode->output(x, skb);
		if (err) {
			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
			goto error_nolock;
		}

		spin_lock_bh(&x->lock);
		err = xfrm_state_check_expire(x);
		if (err) {
			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEEXPIRED);
			goto error;
		}

		if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
			XFRM_SKB_CB(skb)->seq.output = ++x->replay.oseq;
			if (unlikely(x->replay.oseq == 0)) {
				XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR);
				x->replay.oseq--;
				xfrm_audit_state_replay_overflow(x, skb);
				err = -EOVERFLOW;
				goto error;
			}
			if (xfrm_aevent_is_on(net))
				xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
		}

		x->curlft.bytes += skb->len;
		x->curlft.packets++;

		spin_unlock_bh(&x->lock);

		err = x->type->output(x, skb);
		if (err == -EINPROGRESS)
			goto out_exit;

resume:
		if (err) {
			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR);
			goto error_nolock;
		}

		dst = dst_pop(dst);
		if (!dst) {
			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
			err = -EHOSTUNREACH;
			goto error_nolock;
		}
		skb_dst_set(skb, dst);
		x = dst->xfrm;
	} while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));

	err = 0;

out_exit:
	return err;
error:
	spin_unlock_bh(&x->lock);
error_nolock:
	kfree_skb(skb);
	goto out_exit;
}
Exemplo n.º 6
0
static int ipcomp6_output(struct sk_buff **pskb)
{
	int err;
	struct dst_entry *dst = (*pskb)->dst;
	struct xfrm_state *x = dst->xfrm;
	struct ipv6hdr *tmp_iph = NULL, *iph, *top_iph;
	int hdr_len = 0;
	struct ipv6_comp_hdr *ipch;
	struct ipcomp_data *ipcd = x->data;
	u8 *prevhdr;
	u8 nexthdr = 0;
	int plen, dlen;
	u8 *start, *scratch = ipcd->scratch;

	if ((*pskb)->ip_summed == CHECKSUM_HW) {
		err = skb_checksum_help(pskb, 0);
		if (err)
			goto error_nolock;
	}

	spin_lock_bh(&x->lock);

	err = xfrm_check_output(x, *pskb, AF_INET6);
	if (err)
		goto error;

	if (x->props.mode) {
		hdr_len = sizeof(struct ipv6hdr);
		nexthdr = IPPROTO_IPV6;
		iph = (*pskb)->nh.ipv6h;
		top_iph = (struct ipv6hdr *)skb_push(*pskb, sizeof(struct ipv6hdr));
		top_iph->version = 6;
		top_iph->priority = iph->priority;
		top_iph->flow_lbl[0] = iph->flow_lbl[0];
		top_iph->flow_lbl[1] = iph->flow_lbl[1];
		top_iph->flow_lbl[2] = iph->flow_lbl[2];
		top_iph->nexthdr = IPPROTO_IPV6; /* initial */
		top_iph->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
		top_iph->hop_limit = iph->hop_limit;
		memcpy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr, sizeof(struct in6_addr));
		memcpy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr, sizeof(struct in6_addr));
		(*pskb)->nh.raw = (*pskb)->data; /* == top_iph */
		(*pskb)->h.raw = (*pskb)->nh.raw + hdr_len;
	} else {
		hdr_len = ip6_find_1stfragopt(*pskb, &prevhdr);
		nexthdr = *prevhdr;
	}

	/* check whether datagram len is larger than threshold */
	if (((*pskb)->len - hdr_len) < ipcd->threshold) {
		goto out_ok;
	}

	if ((skb_is_nonlinear(*pskb) || skb_cloned(*pskb)) &&
		skb_linearize(*pskb, GFP_ATOMIC) != 0) {
		err = -ENOMEM;
		goto error;
	}

	/* compression */
	plen = (*pskb)->len - hdr_len;
	dlen = IPCOMP_SCRATCH_SIZE;
	start = (*pskb)->data + hdr_len;

	err = crypto_comp_compress(ipcd->tfm, start, plen, scratch, &dlen);
	if (err) {
		goto error;
	}
	if ((dlen + sizeof(struct ipv6_comp_hdr)) >= plen) {
		goto out_ok;
	}
	memcpy(start, scratch, dlen);
	pskb_trim(*pskb, hdr_len+dlen);

	/* insert ipcomp header and replace datagram */
	tmp_iph = kmalloc(hdr_len, GFP_ATOMIC);
	if (!tmp_iph) {
		err = -ENOMEM;
		goto error;
	}
	memcpy(tmp_iph, (*pskb)->nh.raw, hdr_len);
	top_iph = (struct ipv6hdr*)skb_push(*pskb, sizeof(struct ipv6_comp_hdr));
	memcpy(top_iph, tmp_iph, hdr_len);
	kfree(tmp_iph);

	if (x->props.mode && (x->props.flags & XFRM_STATE_NOECN))
		IP6_ECN_clear(top_iph);
	top_iph->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
	(*pskb)->nh.raw = (*pskb)->data; /* top_iph */
	ip6_find_1stfragopt(*pskb, &prevhdr); 
	*prevhdr = IPPROTO_COMP;

	ipch = (struct ipv6_comp_hdr *)((unsigned char *)top_iph + hdr_len);
	ipch->nexthdr = nexthdr;
	ipch->flags = 0;
	ipch->cpi = htons((u16 )ntohl(x->id.spi));

	(*pskb)->h.raw = (unsigned char*)ipch;
out_ok:
	x->curlft.bytes += (*pskb)->len;
	x->curlft.packets++;
	spin_unlock_bh(&x->lock);

	if (((*pskb)->dst = dst_pop(dst)) == NULL) {
		err = -EHOSTUNREACH;
		goto error_nolock;
	}
	err = NET_XMIT_BYPASS;

out_exit:
	return err;
error:
	spin_unlock_bh(&x->lock);
error_nolock:
	kfree_skb(*pskb);
	goto out_exit;
}
static int xfrm4_output_one(struct sk_buff *skb)
{
	struct dst_entry *dst = skb->dst;
	struct xfrm_state *x = dst->xfrm;
	int err;

	/* purpose: 0014838 author: paul.chen date: 2011-12-06          */
	/* description: Fix reboot & plug in crash for VPN G2G wildcard */
	if(skb && skb->nh.iph && skb->nh.iph->protocol==IPPROTO_IGMP)
	{
		err = -EINVAL;
		goto error_nolock;
	}

	if (skb->ip_summed == CHECKSUM_HW) {
		err = skb_checksum_help(skb, 0);
		if (err)
			goto error_nolock;
	}

	if (x->props.mode) {
		err = xfrm4_tunnel_check_size(skb);
		if (err)
			goto error_nolock;
	}

	do {
		spin_lock_bh(&x->lock);
		err = xfrm_state_check(x, skb);
		if (err)
			goto error;

#if defined(CONFIG_CAVIUM_OCTEON_IPSEC) && defined(CONFIG_NET_KEY) 
		/*
		 * If Octeon IPSEC Acceleration module has been loaded
		 * call it, otherwise, follow the software path
		 */
		if(cavium_ipsec_process) 
		{
			if (skb_is_nonlinear(skb) &&
				skb_linearize(skb, GFP_ATOMIC) != 0) {
				err = -ENOMEM;
				goto error;
			}
			err = cavium_ipsec_process(x->sa_handle, skb, 0, 1 /*ENCRYPT*/);
		}
		else
		{
			xfrm4_encap(skb);
			err = x->type->output(x, skb);
		}
#else
		xfrm4_encap(skb);
		err = x->type->output(x, skb);
#endif
		if (err)
			goto error;

		x->curlft.bytes += skb->len;
		x->curlft.packets++;

		spin_unlock_bh(&x->lock);
	
		if (!(skb->dst = dst_pop(dst))) {
			err = -EHOSTUNREACH;
			goto error_nolock;
		}
		dst = skb->dst;
		x = dst->xfrm;
	} while (x && !x->props.mode);

	IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
	err = 0;

out_exit:
	return err;
error:
	spin_unlock_bh(&x->lock);
error_nolock:
	kfree_skb(skb);
	goto out_exit;
}
Exemplo n.º 8
0
int esp_output(struct sk_buff *skb)
{
	int err;
	struct dst_entry *dst = skb->dst;
	struct xfrm_state *x  = dst->xfrm;
	struct iphdr *iph, *top_iph;
	struct ip_esp_hdr *esph;
	struct crypto_tfm *tfm;
	struct esp_data *esp;
	struct sk_buff *trailer;
	struct udphdr *uh = NULL;
	struct xfrm_encap_tmpl *encap = NULL;
	int blksize;
	int clen;
	int alen;
	int nfrags;
	union {
		struct iphdr	iph;
		char 		buf[60];
	} tmp_iph;

	/* First, if the skb is not checksummed, complete checksum. */
	if (skb->ip_summed == CHECKSUM_HW && skb_checksum_help(skb) == NULL) {
		err = -EINVAL;
		goto error_nolock;
	}

	spin_lock_bh(&x->lock);
	err = xfrm_check_output(x, skb, AF_INET);
	if (err)
		goto error;
	err = -ENOMEM;

	/* Strip IP header in transport mode. Save it. */
	if (!x->props.mode) {
		iph = skb->nh.iph;
		memcpy(&tmp_iph, iph, iph->ihl*4);
		__skb_pull(skb, iph->ihl*4);
	}
	/* Now skb is pure payload to encrypt */

	/* Round to block size */
	clen = skb->len;

	esp = x->data;
	alen = esp->auth.icv_trunc_len;
	tfm = esp->conf.tfm;
	blksize = (crypto_tfm_alg_blocksize(tfm) + 3) & ~3;
	clen = (clen + 2 + blksize-1)&~(blksize-1);
	if (esp->conf.padlen)
		clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1);

	if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0)
		goto error;

	/* Fill padding... */
	do {
		int i;
		for (i=0; i<clen-skb->len - 2; i++)
			*(u8*)(trailer->tail + i) = i+1;
	} while (0);
	*(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2;
	pskb_put(skb, trailer, clen - skb->len);

	encap = x->encap;

	iph = skb->nh.iph;
	if (x->props.mode) {
		top_iph = (struct iphdr*)skb_push(skb, x->props.header_len);
		esph = (struct ip_esp_hdr*)(top_iph+1);
		if (encap && encap->encap_type) {
			switch (encap->encap_type) {
			case UDP_ENCAP_ESPINUDP:
				uh = (struct udphdr*) esph;
				esph = (struct ip_esp_hdr*)(uh+1);
				top_iph->protocol = IPPROTO_UDP;
				break;
			default:
				printk(KERN_INFO
				       "esp_output(): Unhandled encap: %u\n",
				       encap->encap_type);
				top_iph->protocol = IPPROTO_ESP;
				break;
			}
		} else
			top_iph->protocol = IPPROTO_ESP;
		*(u8*)(trailer->tail - 1) = IPPROTO_IPIP;
		top_iph->ihl = 5;
		top_iph->version = 4;
		top_iph->tos = iph->tos;	/* DS disclosed */
		if (x->props.flags & XFRM_STATE_NOECN)
			IP_ECN_clear(top_iph);
		top_iph->tot_len = htons(skb->len + alen);
		top_iph->frag_off = iph->frag_off&htons(IP_DF);
		if (!(top_iph->frag_off))
			ip_select_ident(top_iph, dst, 0);
		top_iph->ttl = iph->ttl;	/* TTL disclosed */
		top_iph->check = 0;
		top_iph->saddr = x->props.saddr.a4;
		top_iph->daddr = x->id.daddr.a4;
		memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
	} else {
		esph = (struct ip_esp_hdr*)skb_push(skb, x->props.header_len);
		top_iph = (struct iphdr*)skb_push(skb, iph->ihl*4);
		memcpy(top_iph, &tmp_iph, iph->ihl*4);
		if (encap && encap->encap_type) {
			switch (encap->encap_type) {
			case UDP_ENCAP_ESPINUDP:
				uh = (struct udphdr*) esph;
				esph = (struct ip_esp_hdr*)(uh+1);
				top_iph->protocol = IPPROTO_UDP;
				break;
			default:
				printk(KERN_INFO
				       "esp_output(): Unhandled encap: %u\n",
				       encap->encap_type);
				top_iph->protocol = IPPROTO_ESP;
				break;
			}
		} else
			top_iph->protocol = IPPROTO_ESP;
		iph = &tmp_iph.iph;
		top_iph->tot_len = htons(skb->len + alen);
		top_iph->check = 0;
		top_iph->frag_off = iph->frag_off;
		*(u8*)(trailer->tail - 1) = iph->protocol;
	}

	/* this is non-NULL only with UDP Encapsulation */
	if (encap && uh) {
		uh->source = encap->encap_sport;
		uh->dest = encap->encap_dport;
		uh->len = htons(skb->len + alen - sizeof(struct iphdr));
		uh->check = 0;
	}

	esph->spi = x->id.spi;
	esph->seq_no = htonl(++x->replay.oseq);

	if (esp->conf.ivlen)
		crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));

	do {
		struct scatterlist sgbuf[nfrags>MAX_SG_ONSTACK ? 0 : nfrags];
		struct scatterlist *sg = sgbuf;

		if (unlikely(nfrags > MAX_SG_ONSTACK)) {
			sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
			if (!sg)
				goto error;
		}
		skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
		crypto_cipher_encrypt(tfm, sg, sg, clen);
		if (unlikely(sg != sgbuf))
			kfree(sg);
	} while (0);

	if (esp->conf.ivlen) {
		memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
		crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
	}

	if (esp->auth.icv_full_len) {
		esp->auth.icv(esp, skb, (u8*)esph-skb->data,
		              sizeof(struct ip_esp_hdr) + esp->conf.ivlen+clen, trailer->tail);
		pskb_put(skb, trailer, alen);
	}

	ip_send_check(top_iph);

	skb->nh.raw = skb->data;

	x->curlft.bytes += skb->len;
	x->curlft.packets++;
	spin_unlock_bh(&x->lock);
	if ((skb->dst = dst_pop(dst)) == NULL) {
		err = -EHOSTUNREACH;
		goto error_nolock;
	}
	return NET_XMIT_BYPASS;

error:
	spin_unlock_bh(&x->lock);
error_nolock:
	kfree_skb(skb);
	return err;
}
Exemplo n.º 9
0
static int ipcomp_output(struct sk_buff *skb)
{
	int err;
	struct dst_entry *dst = skb->dst;
	struct xfrm_state *x = dst->xfrm;
	struct iphdr *iph, *top_iph;
	struct ip_comp_hdr *ipch;
	struct ipcomp_data *ipcd = x->data;
	union {
		struct iphdr	iph;
		char 		buf[60];
	} tmp_iph;
	int hdr_len = 0;

	if (skb->ip_summed == CHECKSUM_HW && skb_checksum_help(skb) == NULL) {
		err = -EINVAL;
		goto error_nolock;
	}

	spin_lock_bh(&x->lock);
	err = xfrm_check_output(x, skb, AF_INET);
	if (err)
		goto error;

	/* Don't bother compressing */
	if (!x->props.mode) {
		iph = skb->nh.iph;
		hdr_len = iph->ihl * 4;
	}
	if ((skb->len - hdr_len) < ipcd->threshold) {
		if (x->props.mode) {
			ipcomp_tunnel_encap(x, skb);
			iph = skb->nh.iph;
			iph->protocol = IPPROTO_IPIP;
			ip_send_check(iph);
		}
		goto out_ok;
	}

	if (x->props.mode) 
		ipcomp_tunnel_encap(x, skb);

	if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
	    skb_linearize(skb, GFP_ATOMIC) != 0) {
	    	err = -ENOMEM;
	    	goto error;
	}
	
	err = ipcomp_compress(x, skb);
	if (err) {
		if (err == -EMSGSIZE) {
			if (x->props.mode) {
				iph = skb->nh.iph;
				iph->protocol = IPPROTO_IPIP;
				ip_send_check(iph);
			}
			goto out_ok;
		}
		goto error;
	}

	/* Install ipcomp header, convert into ipcomp datagram. */
	iph = skb->nh.iph;
	memcpy(&tmp_iph, iph, iph->ihl * 4);
	top_iph = (struct iphdr *)skb_push(skb, sizeof(struct ip_comp_hdr));
	memcpy(top_iph, &tmp_iph, iph->ihl * 4);
	iph = top_iph;
	if (x->props.mode && (x->props.flags & XFRM_STATE_NOECN))
		IP_ECN_clear(iph);
	iph->tot_len = htons(skb->len);
	iph->protocol = IPPROTO_COMP;
	iph->check = 0;
	ipch = (struct ip_comp_hdr *)((char *)iph + iph->ihl * 4);
	ipch->nexthdr = x->props.mode ? IPPROTO_IPIP : tmp_iph.iph.protocol;
	ipch->flags = 0;
	ipch->cpi = htons((u16 )ntohl(x->id.spi));
	ip_send_check(iph);
	skb->nh.raw = skb->data;

out_ok:
	x->curlft.bytes += skb->len;
	x->curlft.packets++;
	spin_unlock_bh(&x->lock);
	
	if ((skb->dst = dst_pop(dst)) == NULL) {
		err = -EHOSTUNREACH;
		goto error_nolock;
	}
	err = NET_XMIT_BYPASS;

out_exit:
	return err;
error:
	spin_unlock_bh(&x->lock);
error_nolock:
	kfree_skb(skb);
	goto out_exit;
}
Exemplo n.º 10
0
/* Route IPv4 packet 'skbp', using the route key selectors in 
   'route_selector' and the interface number 'ifnum_in'. */
Boolean
ssh_interceptor_reroute_skb_ipv4(SshInterceptor interceptor,
				 struct sk_buff *skbp,
				 SshUInt16 route_selector,
				 SshUInt32 ifnum_in)
{
  struct iphdr *iph;
  int rval = 0;

  /* Recalculate the route info as the engine might have touched the
     destination address. This can happen for example if we are in
     tunnel mode. */

  iph = (struct iphdr *) SSH_SKB_GET_NETHDR(skbp);
  if (iph == NULL)
    {
      SSH_DEBUG(SSH_D_ERROR, ("Could not access IP header"));
      return FALSE;
    }

  /* Release old dst_entry */
  if (skb_dst(skbp))
    dst_release(skb_dst(skbp));

  skb_dst_set(skbp, NULL);

  if ((route_selector & SSH_INTERCEPTOR_ROUTE_KEY_SRC)
      && (route_selector & SSH_INTERCEPTOR_ROUTE_KEY_FLAG_LOCAL_SRC) == 0
      && (route_selector & SSH_INTERCEPTOR_ROUTE_KEY_IN_IFNUM))
    {
      u32 saddr = 0;
      u8 ipproto = 0;
      u8 tos = 0;
#if (SSH_INTERCEPTOR_NUM_EXTENSION_SELECTORS > 0)
#ifdef SSH_LINUX_FWMARK_EXTENSION_SELECTOR
      u32 fwmark = 0;
#endif /* SSH_LINUX_FWMARK_EXTENSION_SELECTOR */
#endif /* (SSH_INTERCEPTOR_NUM_EXTENSION_SELECTORS > 0) */
      struct net_device *dev;

      SSH_ASSERT(skbp->protocol == __constant_htons(ETH_P_IP));

      if (route_selector & SSH_INTERCEPTOR_ROUTE_KEY_SRC)
	saddr = iph->saddr;
           
      /* Map 'ifnum_in' to a net_device. */
      SSH_LINUX_ASSERT_VALID_IFNUM(ifnum_in);
      dev = ssh_interceptor_ifnum_to_netdev(interceptor, ifnum_in);

      /* Clear the IP protocol, if selector does not define it.
	 Ugly, but necessary to make sure the skb gets
	 rerouted like engine expects. */
      if ((route_selector & SSH_INTERCEPTOR_ROUTE_KEY_IPPROTO) == 0)
	{	  
	  ipproto = iph->protocol;
	  iph->protocol = 0;
	}

      if (route_selector & SSH_INTERCEPTOR_ROUTE_KEY_IP4_TOS)
	tos = RT_TOS(iph->tos);

#if (SSH_INTERCEPTOR_NUM_EXTENSION_SELECTORS > 0)
#ifdef SSH_LINUX_FWMARK_EXTENSION_SELECTOR
      /* Clear the nfmark, if selector does not define it.
	 Ugly, but necessary to make sure the skb gets
	 rerouted like engine expects. */
      if ((route_selector & SSH_INTERCEPTOR_ROUTE_KEY_EXTENSION) == 0)
	{
	  fwmark = SSH_SKB_MARK(skbp);
	  SSH_SKB_MARK(skbp) = 0;
	}
#endif /* SSH_LINUX_FWMARK_EXTENSION_SELECTOR */
#endif /* (SSH_INTERCEPTOR_NUM_EXTENSION_SELECTORS > 0) */
      
      /* Call ip_route_input */
      if (ip_route_input(skbp, iph->daddr, saddr, tos, dev) < 0)
	{
	  SSH_DEBUG(SSH_D_FAIL, 
		    ("ip_route_input failed. (0x%08x -> 0x%08x)",
		     iph->saddr, iph->daddr));

	  SSH_DEBUG(SSH_D_NICETOKNOW,
		    ("dst 0x%08x src 0x%08x iif %d[%s] proto %d tos 0x%02x "
		     "fwmark 0x%x",
		     iph->daddr, saddr,
		     (dev ? dev->ifindex : -1),
		     (dev ? dev->name : "none"), 
		     iph->protocol, tos, 
		     SSH_SKB_MARK(skbp)));

	  /* Release netdev reference */
	  if (dev)
	    ssh_interceptor_release_netdev(dev);

	  /* Note, skb modifications are not un-done as the caller frees the 
	     skb. If this is changed then the modifications should be un-done
	     here before returning. */

	  return FALSE;
	}

      /* Write original IP protocol back to skb */
      if (ipproto)
	iph->protocol = ipproto;

#if (SSH_INTERCEPTOR_NUM_EXTENSION_SELECTORS > 0)
#ifdef SSH_LINUX_FWMARK_EXTENSION_SELECTOR
      /* Write original fwmark back to skb */
      if (fwmark)
	SSH_SKB_MARK(skbp) = fwmark;
#endif /* SSH_LINUX_FWMARK_EXTENSION_SELECTOR */
#endif /* (SSH_INTERCEPTOR_NUM_EXTENSION_SELECTORS > 0) */

      /* Release netdev reference */
      if (dev)
	ssh_interceptor_release_netdev(dev);
    }

  else
    {
      struct rtable *rt;
      struct flowi rt_key;

      if ((route_selector & SSH_INTERCEPTOR_ROUTE_KEY_FLAG_LOCAL_SRC) == 0)
	route_selector &= ~SSH_INTERCEPTOR_ROUTE_KEY_SRC;

      memset(&rt_key, 0, sizeof(rt_key));
      
      rt_key.fl4_dst = iph->daddr;
      if (route_selector & SSH_INTERCEPTOR_ROUTE_KEY_SRC)
	rt_key.fl4_src = iph->saddr;
      if (route_selector & SSH_INTERCEPTOR_ROUTE_KEY_OUT_IFNUM)
	rt_key.oif = (skbp->dev ? skbp->dev->ifindex : 0);
      if (route_selector & SSH_INTERCEPTOR_ROUTE_KEY_IPPROTO)
	rt_key.proto = iph->protocol;
      if (route_selector & SSH_INTERCEPTOR_ROUTE_KEY_IP4_TOS)
	rt_key.fl4_tos = RT_TOS(iph->tos);
      rt_key.fl4_scope = RT_SCOPE_UNIVERSE;

#if (SSH_INTERCEPTOR_NUM_EXTENSION_SELECTORS > 0)
#ifdef SSH_LINUX_FWMARK_EXTENSION_SELECTOR
      if (route_selector & SSH_INTERCEPTOR_ROUTE_KEY_EXTENSION)
	{
#ifdef LINUX_HAS_SKB_MARK
	  rt_key.mark = SSH_SKB_MARK(skbp);
#else /* LINUX_HAS_SKB_MARK */
#ifdef CONFIG_IP_ROUTE_FWMARK
	  rt_key.fl4_fwmark = SSH_SKB_MARK(skbp);
#endif /* CONFIG_IP_ROUTE_FWMARK */
#endif /* LINUX_HAS_SKB_MARK */
	}
#endif /* SSH_LINUX_FWMARK_EXTENSION_SELECTOR */
#endif /* (SSH_INTERCEPTOR_NUM_EXTENSION_SELECTORS > 0) */
      
      /* Call ip_route_output */
#ifdef LINUX_IP_ROUTE_OUTPUT_KEY_HAS_NET_ARGUMENT
      rval = ip_route_output_key(&init_net, &rt, &rt_key);
#else /* LINUX_IP_ROUTE_OUTPUT_KEY_HAS_NET_ARGUMENT */
      rval = ip_route_output_key(&rt, &rt_key);
#endif /* LINUX_IP_ROUTE_OUTPUT_KEY_HAS_NET_ARGUMENT */
      if (rval < 0)
	{
	  SSH_DEBUG(SSH_D_FAIL, 
		    ("ip_route_output_key failed (0x%08x -> 0x%08x): %d",
		     iph->saddr, iph->daddr, rval));

	  SSH_DEBUG(SSH_D_NICETOKNOW,
		    ("dst 0x%08x src 0x%08x oif %d[%s] proto %d tos 0x%02x"
		     "fwmark 0x%x",
		     iph->daddr,
		     ((route_selector & SSH_INTERCEPTOR_ROUTE_KEY_SRC) ? 
		      iph->saddr : 0),
		     ((route_selector & SSH_INTERCEPTOR_ROUTE_KEY_OUT_IFNUM) ?
		      rt_key.oif : -1),
		     ((route_selector & SSH_INTERCEPTOR_ROUTE_KEY_OUT_IFNUM) ?
		      (skbp->dev ? skbp->dev->name : "none") : "none"),
		     ((route_selector & SSH_INTERCEPTOR_ROUTE_KEY_IPPROTO) ?
		      iph->protocol : -1),
		     ((route_selector & SSH_INTERCEPTOR_ROUTE_KEY_IP4_TOS) ?
		      iph->tos : 0),
		     ((route_selector & SSH_INTERCEPTOR_ROUTE_KEY_EXTENSION) ?
		      SSH_SKB_MARK(skbp) : 0)));

	  /* Note, skb modifications are not un-done as the caller frees the 
	     skb. If this is changed then the modifications should be un-done
	     here before returning. */

	  return FALSE;
	}

      /* Make a new dst because we just rechecked the route. */
      skb_dst_set(skbp, dst_clone(&rt->u.dst));
      
      /* Release the routing table entry ; otherwise a memory leak occurs
	 in the route entry table. */
      ip_rt_put(rt);
    }
  
  SSH_ASSERT(skb_dst(skbp) != NULL);

#ifdef SSH_IPSEC_IP_ONLY_INTERCEPTOR
#ifdef LINUX_FRAGMENTATION_AFTER_NF_POST_ROUTING
  if (route_selector & SSH_INTERCEPTOR_ROUTE_KEY_FLAG_TRANSFORM_APPLIED)
    {
      /* Check if need to create a child dst_entry with interface MTU. */
      if (skb_dst(skbp)->child == NULL)
	{
          if (interceptor_route_create_child_dst(skb_dst(skbp)) == NULL)
	    {
	      SSH_DEBUG(SSH_D_ERROR,
			("Could not create child dst_entry for dst %p",
			 skb_dst(skbp)));
	      return FALSE;
	    }
	}
     
      /* Pop dst stack and use the child entry with interface MTU 
	 for sending the packet. */
      skb_dst_set(skbp, dst_pop(skb_dst(skbp)));
    }
#endif /* LINUX_FRAGMENTATION_AFTER_NF_POST_ROUTING */
#endif /* SSH_IPSEC_IP_ONLY_INTERCEPTOR */

  return TRUE;
}
Exemplo n.º 11
0
int esp6_output(struct sk_buff *skb)
{
	int err;
	int hdr_len = 0;
	struct dst_entry *dst = skb->dst;
	struct xfrm_state *x  = dst->xfrm;
	struct ipv6hdr *iph = NULL, *top_iph;
	struct ipv6_esp_hdr *esph;
	struct crypto_tfm *tfm;
	struct esp_data *esp;
	struct sk_buff *trailer;
	int blksize;
	int clen;
	int alen;
	int nfrags;
	u8 *prevhdr;
	u8 nexthdr = 0;

	/* First, if the skb is not checksummed, complete checksum. */
	if (skb->ip_summed == CHECKSUM_HW && skb_checksum_help(skb) == NULL) {
		err = -EINVAL;
		goto error_nolock;
	}

	spin_lock_bh(&x->lock);
	err = xfrm_check_output(x, skb, AF_INET6);
	if (err)
		goto error;
	err = -ENOMEM;

	/* Strip IP header in transport mode. Save it. */

	if (!x->props.mode) {
		hdr_len = ip6_find_1stfragopt(skb, &prevhdr);
		nexthdr = *prevhdr;
		*prevhdr = IPPROTO_ESP;
		iph = kmalloc(hdr_len, GFP_ATOMIC);
		if (!iph) {
			err = -ENOMEM;
			goto error;
		}
		memcpy(iph, skb->nh.raw, hdr_len);
		__skb_pull(skb, hdr_len);
	}

	/* Now skb is pure payload to encrypt */

	/* Round to block size */
	clen = skb->len;

	esp = x->data;
	alen = esp->auth.icv_trunc_len;
	tfm = esp->conf.tfm;
	blksize = (crypto_tfm_alg_blocksize(tfm) + 3) & ~3;
	clen = (clen + 2 + blksize-1)&~(blksize-1);
	if (esp->conf.padlen)
		clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1);

	if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) {
		if (!x->props.mode && iph) kfree(iph);
		goto error;
	}

	/* Fill padding... */
	do {
		int i;
		for (i=0; i<clen-skb->len - 2; i++)
			*(u8*)(trailer->tail + i) = i+1;
	} while (0);
	*(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2;
	pskb_put(skb, trailer, clen - skb->len);

	if (x->props.mode) {
		iph = skb->nh.ipv6h;
		top_iph = (struct ipv6hdr*)skb_push(skb, x->props.header_len);
		esph = (struct ipv6_esp_hdr*)(top_iph+1);
		*(u8*)(trailer->tail - 1) = IPPROTO_IPV6;
		top_iph->version = 6;
		top_iph->priority = iph->priority;
		top_iph->flow_lbl[0] = iph->flow_lbl[0];
		top_iph->flow_lbl[1] = iph->flow_lbl[1];
		top_iph->flow_lbl[2] = iph->flow_lbl[2];
		if (x->props.flags & XFRM_STATE_NOECN)
			IP6_ECN_clear(top_iph);
		top_iph->nexthdr = IPPROTO_ESP;
		top_iph->payload_len = htons(skb->len + alen - sizeof(struct ipv6hdr));
		top_iph->hop_limit = iph->hop_limit;
		ipv6_addr_copy(&top_iph->saddr,
			       (struct in6_addr *)&x->props.saddr);
		ipv6_addr_copy(&top_iph->daddr,
			       (struct in6_addr *)&x->id.daddr);
	} else { 
		esph = (struct ipv6_esp_hdr*)skb_push(skb, x->props.header_len);
		skb->h.raw = (unsigned char*)esph;
		top_iph = (struct ipv6hdr*)skb_push(skb, hdr_len);
		memcpy(top_iph, iph, hdr_len);
		kfree(iph);
		top_iph->payload_len = htons(skb->len + alen - sizeof(struct ipv6hdr));
		*(u8*)(trailer->tail - 1) = nexthdr;
	}

	esph->spi = x->id.spi;
	esph->seq_no = htonl(++x->replay.oseq);

	if (esp->conf.ivlen)
		crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));

	do {
		struct scatterlist sgbuf[nfrags>MAX_SG_ONSTACK ? 0 : nfrags];
		struct scatterlist *sg = sgbuf;

		if (unlikely(nfrags > MAX_SG_ONSTACK)) {
			sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
			if (!sg)
				goto error;
		}
		skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
		crypto_cipher_encrypt(tfm, sg, sg, clen);
		if (unlikely(sg != sgbuf))
			kfree(sg);
	} while (0);

	if (esp->conf.ivlen) {
		memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
		crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
	}

	if (esp->auth.icv_full_len) {
		esp->auth.icv(esp, skb, (u8*)esph-skb->data,
			sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen+clen, trailer->tail);
		pskb_put(skb, trailer, alen);
	}

	skb->nh.raw = skb->data;

	x->curlft.bytes += skb->len;
	x->curlft.packets++;
	spin_unlock_bh(&x->lock);
	if ((skb->dst = dst_pop(dst)) == NULL) {
		err = -EHOSTUNREACH;
		goto error_nolock;
	}
	return NET_XMIT_BYPASS;

error:
	spin_unlock_bh(&x->lock);
error_nolock:
	kfree_skb(skb);
	return err;
}