int xfrm6_rcv_spi(struct sk_buff *skb, __be32 spi) { int err; __be32 seq; struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH]; struct xfrm_state *x; int xfrm_nr = 0; int decaps = 0; int nexthdr; unsigned int nhoff; nhoff = IP6CB(skb)->nhoff; nexthdr = skb_network_header(skb)[nhoff]; seq = 0; if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) goto drop; do { struct ipv6hdr *iph = skb->nh.ipv6h; if (xfrm_nr == XFRM_MAX_DEPTH) goto drop; x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi, nexthdr != IPPROTO_IPIP ? nexthdr : IPPROTO_IPV6, AF_INET6); if (x == NULL) goto drop; spin_lock(&x->lock); if (unlikely(x->km.state != XFRM_STATE_VALID)) goto drop_unlock; if (x->props.replay_window && xfrm_replay_check(x, seq)) goto drop_unlock; if (xfrm_state_check_expire(x)) goto drop_unlock; nexthdr = x->type->input(x, skb); if (nexthdr <= 0) goto drop_unlock; skb_network_header(skb)[nhoff] = nexthdr; if (x->props.replay_window) xfrm_replay_advance(x, seq); x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock(&x->lock); xfrm_vec[xfrm_nr++] = x; if (x->mode->input(x, skb)) goto drop; if (x->props.mode == XFRM_MODE_TUNNEL) { /* XXX */ decaps = 1; break; } if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) < 0) goto drop; } while (!err); /* Allocate new secpath or COW existing one. */ if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { struct sec_path *sp; sp = secpath_dup(skb->sp); if (!sp) goto drop; if (skb->sp) secpath_put(skb->sp); skb->sp = sp; } if (xfrm_nr + skb->sp->len > XFRM_MAX_DEPTH) goto drop; memcpy(skb->sp->xvec + skb->sp->len, xfrm_vec, xfrm_nr * sizeof(xfrm_vec[0])); skb->sp->len += xfrm_nr; skb->ip_summed = CHECKSUM_NONE; nf_reset(skb); if (decaps) { dst_release(skb->dst); skb->dst = NULL; netif_rx(skb); return -1; } else { #ifdef CONFIG_NETFILTER skb->nh.ipv6h->payload_len = htons(skb->len); __skb_push(skb, skb->data - skb_network_header(skb)); NF_HOOK(PF_INET6, NF_IP6_PRE_ROUTING, skb, skb->dev, NULL, ip6_rcv_finish); return -1; #else return 1; #endif } drop_unlock: spin_unlock(&x->lock); xfrm_state_put(x); drop: while (--xfrm_nr >= 0) xfrm_state_put(xfrm_vec[xfrm_nr]); kfree_skb(skb); return -1; }
int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type) { int err; u32 spi, seq; struct sec_decap_state xfrm_vec[XFRM_MAX_DEPTH]; struct xfrm_state *x; int xfrm_nr = 0; int decaps = 0; if ((err = xfrm4_parse_spi(skb, skb->nh.iph->protocol, &spi, &seq)) != 0) goto drop; do { struct iphdr *iph = skb->nh.iph; if (xfrm_nr == XFRM_MAX_DEPTH) goto drop; x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi, iph->protocol, AF_INET); if (x == NULL) goto drop; spin_lock(&x->lock); if (unlikely(x->km.state != XFRM_STATE_VALID)) goto drop_unlock; if (x->props.replay_window && xfrm_replay_check(x, seq)) goto drop_unlock; if (xfrm_state_check_expire(x)) goto drop_unlock; xfrm_vec[xfrm_nr].decap.decap_type = encap_type; if (x->type->input(x, &(xfrm_vec[xfrm_nr].decap), skb)) goto drop_unlock; /* only the first xfrm gets the encap type */ encap_type = 0; if (x->props.replay_window) xfrm_replay_advance(x, seq); x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock(&x->lock); xfrm_vec[xfrm_nr++].xvec = x; iph = skb->nh.iph; if (x->props.mode) { if (iph->protocol != IPPROTO_IPIP) goto drop; if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto drop; if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto drop; if (!(x->props.flags & XFRM_STATE_NOECN)) ipip_ecn_decapsulate(skb); skb->mac.raw = memmove(skb->data - skb->mac_len, skb->mac.raw, skb->mac_len); skb->nh.raw = skb->data; memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); decaps = 1; break; } if ((err = xfrm_parse_spi(skb, skb->nh.iph->protocol, &spi, &seq)) < 0) goto drop; } while (!err); /* Allocate new secpath or COW existing one. */ if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { struct sec_path *sp; sp = secpath_dup(skb->sp); if (!sp) goto drop; if (skb->sp) secpath_put(skb->sp); skb->sp = sp; } if (xfrm_nr + skb->sp->len > XFRM_MAX_DEPTH) goto drop; memcpy(skb->sp->x+skb->sp->len, xfrm_vec, xfrm_nr*sizeof(struct sec_decap_state)); skb->sp->len += xfrm_nr; if (decaps) { if (!(skb->dev->flags&IFF_LOOPBACK)) { dst_release(skb->dst); skb->dst = NULL; } netif_rx(skb); return 0; } else { return -skb->nh.iph->protocol; } drop_unlock: spin_unlock(&x->lock); xfrm_state_put(x); drop: while (--xfrm_nr >= 0) xfrm_state_put(xfrm_vec[xfrm_nr].xvec); kfree_skb(skb); return 0; }
static int esp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb) { struct ipv6hdr *iph; struct ipv6_esp_hdr *esph; struct esp_data *esp = x->data; struct sk_buff *trailer; int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); int alen = esp->auth.icv_trunc_len; int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen; int hdr_len = skb->h.raw - skb->nh.raw; int nfrags; unsigned char *tmp_hdr = NULL; int ret = 0; if (!pskb_may_pull(skb, sizeof(struct ipv6_esp_hdr))) { ret = -EINVAL; goto out_nofree; } esph = (struct ipv6_esp_hdr*)skb->data; if (elen <= 0 || (elen & (blksize-1))) { ret = -EINVAL; goto out_nofree; } tmp_hdr = kmalloc(hdr_len, GFP_ATOMIC); if (!tmp_hdr) { ret = -ENOMEM; goto out_nofree; } memcpy(tmp_hdr, skb->nh.raw, hdr_len); /* If integrity check is required, do this. */ if (esp->auth.icv_full_len) { u8 sum[esp->auth.icv_full_len]; u8 sum1[alen]; if (x->props.replay_window && xfrm_replay_check(x, esph->seq_no)) { ret = -EINVAL; goto out; } esp->auth.icv(esp, skb, 0, skb->len-alen, sum); if (skb_copy_bits(skb, skb->len-alen, sum1, alen)) BUG(); if (unlikely(memcmp(sum, sum1, alen))) { x->stats.integrity_failed++; ret = -EINVAL; goto out; } if (x->props.replay_window) xfrm_replay_advance(x, esph->seq_no); } if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) { ret = -EINVAL; goto out; } skb->ip_summed = CHECKSUM_NONE; iph = skb->nh.ipv6h; /* Get ivec. This can be wrong, check against another impls. */ if (esp->conf.ivlen) crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm)); { u8 nexthdr[2]; struct scatterlist *sg = &esp->sgbuf[0]; u8 padlen; if (unlikely(nfrags > ESP_NUM_FAST_SG)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) { ret = -ENOMEM; goto out; } } skb_to_sgvec(skb, sg, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen, elen); crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) BUG(); padlen = nexthdr[0]; if (padlen+2 >= elen) { LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage padlen=%d, elen=%d\n", padlen+2, elen); ret = -EINVAL; goto out; } /* ... check padding bits here. Silly. :-) */ pskb_trim(skb, skb->len - alen - padlen - 2); skb->h.raw = skb_pull(skb, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen); skb->nh.raw += sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen; memcpy(skb->nh.raw, tmp_hdr, hdr_len); skb->nh.ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); ret = nexthdr[1]; } out: kfree(tmp_hdr); out_nofree: return ret; }
int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type) { int err; __be32 spi, seq; struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH]; struct xfrm_state *x; int xfrm_nr = 0; int decaps = 0; if ((err = xfrm4_parse_spi(skb, skb->nh.iph->protocol, &spi, &seq)) != 0) goto drop; do { struct iphdr *iph = skb->nh.iph; if (xfrm_nr == XFRM_MAX_DEPTH) goto drop; x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi, iph->protocol, AF_INET); if (x == NULL) goto drop; spin_lock(&x->lock); if (unlikely(x->km.state != XFRM_STATE_VALID)) goto drop_unlock; if ((x->encap ? x->encap->encap_type : 0) != encap_type) goto drop_unlock; if (x->props.replay_window && xfrm_replay_check(x, seq)) goto drop_unlock; if (xfrm_state_check_expire(x)) goto drop_unlock; if (x->type->input(x, skb)) goto drop_unlock; /* only the first xfrm gets the encap type */ encap_type = 0; if (x->props.replay_window) xfrm_replay_advance(x, seq); x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock(&x->lock); xfrm_vec[xfrm_nr++] = x; if (x->mode->input(x, skb)) goto drop; if (x->props.mode == XFRM_MODE_TUNNEL) { decaps = 1; break; } if ((err = xfrm_parse_spi(skb, skb->nh.iph->protocol, &spi, &seq)) < 0) goto drop; } while (!err); /* Allocate new secpath or COW existing one. */ if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { struct sec_path *sp; sp = secpath_dup(skb->sp); if (!sp) goto drop; if (skb->sp) secpath_put(skb->sp); skb->sp = sp; } if (xfrm_nr + skb->sp->len > XFRM_MAX_DEPTH) goto drop; memcpy(skb->sp->xvec + skb->sp->len, xfrm_vec, xfrm_nr * sizeof(xfrm_vec[0])); skb->sp->len += xfrm_nr; nf_reset(skb); if (decaps) { if (!(skb->dev->flags&IFF_LOOPBACK)) { dst_release(skb->dst); skb->dst = NULL; } netif_rx(skb); return 0; } else { #ifdef CONFIG_NETFILTER __skb_push(skb, skb->data - skb->nh.raw); skb->nh.iph->tot_len = htons(skb->len); ip_send_check(skb->nh.iph); NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, skb->dev, NULL, xfrm4_rcv_encap_finish); return 0; #else return -skb->nh.iph->protocol; #endif } drop_unlock: spin_unlock(&x->lock); xfrm_state_put(x); drop: while (--xfrm_nr >= 0) xfrm_state_put(xfrm_vec[xfrm_nr]); kfree_skb(skb); return 0; }
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) { struct net *net = dev_net(skb->dev); int err; __be32 seq; struct xfrm_state *x; xfrm_address_t *daddr; struct xfrm_mode *inner_mode; unsigned int family; int decaps = 0; int async = 0; /* A negative encap_type indicates async resumption. */ if (encap_type < 0) { async = 1; x = xfrm_input_state(skb); seq = XFRM_SKB_CB(skb)->seq.input; goto resume; } /* Allocate new secpath or COW existing one. */ if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { struct sec_path *sp; sp = secpath_dup(skb->sp); if (!sp) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); goto drop; } if (skb->sp) secpath_put(skb->sp); skb->sp = sp; } daddr = (xfrm_address_t *)(skb_network_header(skb) + XFRM_SPI_SKB_CB(skb)->daddroff); family = XFRM_SPI_SKB_CB(skb)->family; seq = 0; if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); goto drop; } do { if (skb->sp->len == XFRM_MAX_DEPTH) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); goto drop; } x = xfrm_state_lookup_with_mark(net, skb->mark, daddr, spi, nexthdr, family); if (x == NULL) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); xfrm_audit_state_notfound(skb, family, spi, seq); goto drop; } skb->sp->xvec[skb->sp->len++] = x; spin_lock(&x->lock); if (unlikely(x->km.state != XFRM_STATE_VALID)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEINVALID); goto drop_unlock; } if ((x->encap ? x->encap->encap_type : 0) != encap_type) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH); goto drop_unlock; } if (x->props.replay_window && xfrm_replay_check(x, skb, seq)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); goto drop_unlock; } if (xfrm_state_check_expire(x)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED); goto drop_unlock; } spin_unlock(&x->lock); XFRM_SKB_CB(skb)->seq.input = seq; nexthdr = x->type->input(x, skb); if (nexthdr == -EINPROGRESS) return 0; resume: spin_lock(&x->lock); if (nexthdr <= 0) { if (nexthdr == -EBADMSG) { xfrm_audit_state_icvfail(x, skb, x->type->proto); x->stats.integrity_failed++; } XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR); goto drop_unlock; } /* only the first xfrm gets the encap type */ encap_type = 0; if (x->props.replay_window) xfrm_replay_advance(x, seq); x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock(&x->lock); XFRM_MODE_SKB_CB(skb)->protocol = nexthdr; inner_mode = x->inner_mode; if (x->sel.family == AF_UNSPEC) { inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); if (inner_mode == NULL) goto drop; } if (inner_mode->input(x, skb)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); goto drop; } if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) { decaps = 1; break; } /* * We need the inner address. However, we only get here for * transport mode so the outer address is identical. */ daddr = &x->id.daddr; family = x->outer_mode->afinfo->family; err = xfrm_parse_spi(skb, nexthdr, &spi, &seq); if (err < 0) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); goto drop; } } while (!err); nf_reset(skb); if (decaps) { skb_dst_drop(skb); netif_rx(skb); return 0; } else { return x->inner_mode->afinfo->transport_finish(skb, async); } drop_unlock: spin_unlock(&x->lock); drop: kfree_skb(skb); return 0; }
int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type) { int err; u32 spi, seq; struct sec_decap_state xfrm_vec[XFRM_MAX_DEPTH]; struct xfrm_state *x; int xfrm_nr = 0; int decaps = 0; int offset = 0; if ((err = xfrm4_parse_spi(skb, skb->nh.iph->protocol, &spi, &seq)) != 0) goto drop; do { struct iphdr *iph = skb->nh.iph; if (xfrm_nr == XFRM_MAX_DEPTH) goto drop; x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi, iph->protocol, AF_INET); if (x == NULL) goto drop; spin_lock(&x->lock); if (unlikely(x->km.state != XFRM_STATE_VALID)) goto drop_unlock; #if defined(CONFIG_CAVIUM_OCTEON_IPSEC) && defined(CONFIG_NET_KEY) /* * If Octeon IPSEC Acceleration module has been loaded * call it, otherwise, follow the software path */ if(cavium_ipsec_process) { if (xfrm_state_check_expire(x)) goto drop_unlock; xfrm_vec[xfrm_nr].decap.decap_type = encap_type; switch (skb->nh.iph->protocol) { case IPPROTO_AH: offset = offsetof(struct ip_auth_hdr, nexthdr); break; case IPPROTO_ESP: offset = offsetof(struct ip_esp_hdr, spi); break; default: return -skb->nh.iph->protocol; } offset += (uint64_t)skb->data - (uint64_t)skb->nh.iph; /* * skb->data points to the start of the esp/ah header * but we require skb->data to point to the start of ip header. */ skb_push(skb, (unsigned int)((uint64_t)skb->data - (uint64_t)skb->nh.iph)); if (skb_is_nonlinear(skb) && skb_linearize(skb, GFP_ATOMIC) != 0) { err = -ENOMEM; goto drop_unlock; } if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { err = -ENOMEM; goto drop_unlock; } err = cavium_ipsec_process(x->sa_handle, skb, offset, 0 /*DECRYPT*/); if(err) goto drop_unlock; /* only the first xfrm gets the encap type */ encap_type = 0; x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock(&x->lock); xfrm_vec[xfrm_nr++].xvec = x; iph = skb->nh.iph; if (x->props.mode) { #if 0 if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto drop; /* * TODO, support ECN and dscp decapsulation */ if (x->props.flags & XFRM_STATE_DECAP_DSCP) ipv4_copy_dscp(iph, skb->h.ipiph); if (!(x->props.flags & XFRM_STATE_NOECN)) ipip_ecn_decapsulate(skb); #endif skb->nh.raw = skb->data; memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); decaps = 1; break; } } else { /* if (cavium_ipsec_process == NULL) */ if (x->props.replay_window && xfrm_replay_check(x, seq)) goto drop_unlock; if (xfrm_state_check_expire(x)) goto drop_unlock; xfrm_vec[xfrm_nr].decap.decap_type = encap_type; if (x->type->input(x, &(xfrm_vec[xfrm_nr].decap), skb)) goto drop_unlock; /* only the first xfrm gets the encap type */ encap_type = 0; if (x->props.replay_window) xfrm_replay_advance(x, seq); x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock(&x->lock); xfrm_vec[xfrm_nr++].xvec = x; iph = skb->nh.iph; if (x->props.mode) { if (iph->protocol != IPPROTO_IPIP) goto drop; if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto drop; if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto drop; if (x->props.flags & XFRM_STATE_DECAP_DSCP) ipv4_copy_dscp(iph, skb->h.ipiph); if (!(x->props.flags & XFRM_STATE_NOECN)) ipip_ecn_decapsulate(skb); skb->mac.raw = memmove(skb->data - skb->mac_len, skb->mac.raw, skb->mac_len); skb->nh.raw = skb->data; memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); decaps = 1; break; } } /* if (cavium_ipsec_process) */ #else if (x->props.replay_window && xfrm_replay_check(x, seq)) goto drop_unlock; if (xfrm_state_check_expire(x)) goto drop_unlock; xfrm_vec[xfrm_nr].decap.decap_type = encap_type; if (x->type->input(x, &(xfrm_vec[xfrm_nr].decap), skb)) goto drop_unlock; /* only the first xfrm gets the encap type */ encap_type = 0; if (x->props.replay_window) xfrm_replay_advance(x, seq); x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock(&x->lock); xfrm_vec[xfrm_nr++].xvec = x; iph = skb->nh.iph; if (x->props.mode) { if (iph->protocol != IPPROTO_IPIP) goto drop; if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto drop; if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto drop; if (x->props.flags & XFRM_STATE_DECAP_DSCP) ipv4_copy_dscp(iph, skb->h.ipiph); if (!(x->props.flags & XFRM_STATE_NOECN)) ipip_ecn_decapsulate(skb); skb->mac.raw = memmove(skb->data - skb->mac_len, skb->mac.raw, skb->mac_len); skb->nh.raw = skb->data; memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); decaps = 1; break; } #endif if ((err = xfrm_parse_spi(skb, skb->nh.iph->protocol, &spi, &seq)) < 0) goto drop; } while (!err);
/* * Note: detecting truncated vs. non-truncated authentication data is very * expensive, so we only support truncated data, which is the recommended * and common case. */ static int esp_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb) { struct iphdr *iph; struct ip_esp_hdr *esph; struct esp_data *esp = x->data; struct sk_buff *trailer; int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); int alen = esp->auth.icv_trunc_len; int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen; int nfrags; int encap_len = 0; u8 nexthdr[2]; struct scatterlist *sg; u8 workbuf[60]; int padlen; if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr))) goto out; esph = (struct ip_esp_hdr*)skb->data; if (elen <= 0 || (elen & (blksize-1))) goto out; /* If integrity check is required, do this. */ if (esp->auth.icv_full_len) { u8 sum[esp->auth.icv_full_len]; u8 sum1[alen]; if (x->props.replay_window && xfrm_replay_check(x, esph->seq_no)) goto out; esp->auth.icv(esp, skb, 0, skb->len-alen, sum); if (skb_copy_bits(skb, skb->len-alen, sum1, alen)) BUG(); if (unlikely(memcmp(sum, sum1, alen))) { x->stats.integrity_failed++; goto out; } if (x->props.replay_window) xfrm_replay_advance(x, esph->seq_no); } if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) goto out; skb->ip_summed = CHECKSUM_NONE; iph = skb->nh.iph; /* Get ivec. This can be wrong, check against another impls. */ if (esp->conf.ivlen) crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm)); sg = &esp->sgbuf[0]; if (unlikely(nfrags > ESP_NUM_FAST_SG)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) goto out; } skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen); crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) BUG(); padlen = nexthdr[0]; if (padlen+2 >= elen) goto out; /* ... check padding bits here. Silly. :-) */ if (x->encap) { struct xfrm_encap_tmpl *encap = x->encap; struct udphdr *uh; if (encap->encap_type != decap->decap_type) goto out; uh = (struct udphdr *)(iph + 1); encap_len = (void*)esph - (void*)uh; /* * 1) if the NAT-T peer's IP or port changed then * advertize the change to the keying daemon. * This is an inbound SA, so just compare * SRC ports. */ if (iph->saddr != x->props.saddr.a4 || uh->source != encap->encap_sport) { xfrm_address_t ipaddr; ipaddr.a4 = iph->saddr; km_new_mapping(x, &ipaddr, uh->source); /* XXX: perhaps add an extra * policy check here, to see * if we should allow or * reject a packet from a * different source * address/port. */ } /* * 2) ignore UDP/TCP checksums in case * of NAT-T in Transport Mode, or * perform other post-processing fixes * as per draft-ietf-ipsec-udp-encaps-06, * section 3.1.2 */ if (!x->props.mode) skb->ip_summed = CHECKSUM_UNNECESSARY; } iph->protocol = nexthdr[1]; pskb_trim(skb, skb->len - alen - padlen - 2); memcpy(workbuf, skb->nh.raw, iph->ihl*4); skb->h.raw = skb_pull(skb, sizeof(struct ip_esp_hdr) + esp->conf.ivlen); skb->nh.raw += encap_len + sizeof(struct ip_esp_hdr) + esp->conf.ivlen; memcpy(skb->nh.raw, workbuf, iph->ihl*4); skb->nh.iph->tot_len = htons(skb->len); return 0; out: return -EINVAL; }
static int ah_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb) { int ah_hlen; struct iphdr *iph; struct ip_auth_hdr *ah; struct ah_data *ahp; char work_buf[60]; if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr))) goto out; ah = (struct ip_auth_hdr*)skb->data; if (x->props.replay_window && xfrm_replay_check(x, ah->seq_no)) goto out; ahp = x->data; ah_hlen = (ah->hdrlen + 2) << 2; if (ah_hlen != XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_full_len) && ah_hlen != XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len)) goto out; if (!pskb_may_pull(skb, ah_hlen)) goto out; /* We are going to _remove_ AH header to keep sockets happy, * so... Later this can change. */ if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto out; skb->ip_summed = CHECKSUM_NONE; ah = (struct ip_auth_hdr*)skb->data; iph = skb->nh.iph; memcpy(work_buf, iph, iph->ihl*4); iph->ttl = 0; iph->tos = 0; iph->frag_off = 0; iph->check = 0; if (iph->ihl != 5) { u32 dummy; if (ip_clear_mutable_options(iph, &dummy)) goto out; } { u8 auth_data[MAX_AH_AUTH_LEN]; memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); skb_push(skb, skb->data - skb->nh.raw); ahp->icv(ahp, skb, ah->auth_data); if (memcmp(ah->auth_data, auth_data, ahp->icv_trunc_len)) { x->stats.integrity_failed++; goto out; } } if (x->props.replay_window) xfrm_replay_advance(x, ah->seq_no); ((struct iphdr*)work_buf)->protocol = ah->nexthdr; skb->nh.raw = skb_pull(skb, ah_hlen); memcpy(skb->nh.raw, work_buf, iph->ihl*4); skb->nh.iph->tot_len = htons(skb->len); skb_pull(skb, skb->nh.iph->ihl*4); skb->h.raw = skb->data; return 0; out: return -EINVAL; }
int xfrm6_input_resume(struct sk_buff *skb, int nexthdr) { int err; int spi; u32 seq; struct xfrm_state *x; int decaps = 0; unsigned int nhoff; nhoff = IP6CB(skb)->nhoff; x = skb->sp->xvec[skb->sp->len - 1]; seq = XFRM_SKB_CB(skb)->seq; spin_lock(&x->lock); goto resume; do { struct ipv6hdr *iph = skb->nh.ipv6h; if (skb->sp->len == XFRM_MAX_DEPTH) goto drop; x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi, nexthdr, AF_INET6); if (x == NULL) { xfrm_naudit_state_notfound(skb, AF_INET6, spi, seq); goto drop; } skb->sp->xvec[skb->sp->len++] = x; spin_lock(&x->lock); if (unlikely(x->km.state != XFRM_STATE_VALID)) goto drop_unlock; if (x->props.replay_window && xfrm_replay_check(x, seq)) { xfrm_naudit_state_replay(x, skb, seq); goto drop_unlock; } if (xfrm_state_check_expire(x)) goto drop_unlock; XFRM_SKB_CB(skb)->seq = seq; nexthdr = x->type->input(x, skb); resume: if (nexthdr <= 0) goto drop_unlock; skb->nh.raw[nhoff] = nexthdr; if (x->props.replay_window) xfrm_replay_advance(x, seq); x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock(&x->lock); if (x->mode->input(x, skb)) goto drop; if (x->props.mode) { /* XXX */ decaps = 1; break; } if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) < 0) goto drop; } while (!err); skb->ip_summed = CHECKSUM_NONE; nf_reset(skb); if (decaps) { if (!(skb->dev->flags&IFF_LOOPBACK)) { dst_release(skb->dst); skb->dst = NULL; } netif_rx(skb); return -1; } else { skb->nh.ipv6h->payload_len = htons(skb->len); __skb_push(skb, skb->data - skb->nh.raw); NF_HOOK(PF_INET6, NF_IP6_PRE_ROUTING, skb, skb->dev, NULL, dst_input); return -1; } drop_unlock: spin_unlock(&x->lock); if (nexthdr == -EINPROGRESS) return -1; drop: kfree_skb(skb); return -1; }