int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type) { int err; u32 spi, seq; struct sec_decap_state xfrm_vec[XFRM_MAX_DEPTH]; struct xfrm_state *x; int xfrm_nr = 0; int decaps = 0; int offset = 0; if ((err = xfrm4_parse_spi(skb, skb->nh.iph->protocol, &spi, &seq)) != 0) goto drop; do { struct iphdr *iph = skb->nh.iph; if (xfrm_nr == XFRM_MAX_DEPTH) goto drop; x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi, iph->protocol, AF_INET); if (x == NULL) goto drop; spin_lock(&x->lock); if (unlikely(x->km.state != XFRM_STATE_VALID)) goto drop_unlock; #if defined(CONFIG_CAVIUM_OCTEON_IPSEC) && defined(CONFIG_NET_KEY) /* * If Octeon IPSEC Acceleration module has been loaded * call it, otherwise, follow the software path */ if(cavium_ipsec_process) { if (xfrm_state_check_expire(x)) goto drop_unlock; xfrm_vec[xfrm_nr].decap.decap_type = encap_type; switch (skb->nh.iph->protocol) { case IPPROTO_AH: offset = offsetof(struct ip_auth_hdr, nexthdr); break; case IPPROTO_ESP: offset = offsetof(struct ip_esp_hdr, spi); break; default: return -skb->nh.iph->protocol; } offset += (uint64_t)skb->data - (uint64_t)skb->nh.iph; /* * skb->data points to the start of the esp/ah header * but we require skb->data to point to the start of ip header. */ skb_push(skb, (unsigned int)((uint64_t)skb->data - (uint64_t)skb->nh.iph)); if (skb_is_nonlinear(skb) && skb_linearize(skb, GFP_ATOMIC) != 0) { err = -ENOMEM; goto drop_unlock; } if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { err = -ENOMEM; goto drop_unlock; } err = cavium_ipsec_process(x->sa_handle, skb, offset, 0 /*DECRYPT*/); if(err) goto drop_unlock; /* only the first xfrm gets the encap type */ encap_type = 0; x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock(&x->lock); xfrm_vec[xfrm_nr++].xvec = x; iph = skb->nh.iph; if (x->props.mode) { #if 0 if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto drop; /* * TODO, support ECN and dscp decapsulation */ if (x->props.flags & XFRM_STATE_DECAP_DSCP) ipv4_copy_dscp(iph, skb->h.ipiph); if (!(x->props.flags & XFRM_STATE_NOECN)) ipip_ecn_decapsulate(skb); #endif skb->nh.raw = skb->data; memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); decaps = 1; break; } } else { /* if (cavium_ipsec_process == NULL) */ if (x->props.replay_window && xfrm_replay_check(x, seq)) goto drop_unlock; if (xfrm_state_check_expire(x)) goto drop_unlock; xfrm_vec[xfrm_nr].decap.decap_type = encap_type; if (x->type->input(x, &(xfrm_vec[xfrm_nr].decap), skb)) goto drop_unlock; /* only the first xfrm gets the encap type */ encap_type = 0; if (x->props.replay_window) xfrm_replay_advance(x, seq); x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock(&x->lock); xfrm_vec[xfrm_nr++].xvec = x; iph = skb->nh.iph; if (x->props.mode) { if (iph->protocol != IPPROTO_IPIP) goto drop; if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto drop; if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto drop; if (x->props.flags & XFRM_STATE_DECAP_DSCP) ipv4_copy_dscp(iph, skb->h.ipiph); if (!(x->props.flags & XFRM_STATE_NOECN)) ipip_ecn_decapsulate(skb); skb->mac.raw = memmove(skb->data - skb->mac_len, skb->mac.raw, skb->mac_len); skb->nh.raw = skb->data; memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); decaps = 1; break; } } /* if (cavium_ipsec_process) */ #else if (x->props.replay_window && xfrm_replay_check(x, seq)) goto drop_unlock; if (xfrm_state_check_expire(x)) goto drop_unlock; xfrm_vec[xfrm_nr].decap.decap_type = encap_type; if (x->type->input(x, &(xfrm_vec[xfrm_nr].decap), skb)) goto drop_unlock; /* only the first xfrm gets the encap type */ encap_type = 0; if (x->props.replay_window) xfrm_replay_advance(x, seq); x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock(&x->lock); xfrm_vec[xfrm_nr++].xvec = x; iph = skb->nh.iph; if (x->props.mode) { if (iph->protocol != IPPROTO_IPIP) goto drop; if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto drop; if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto drop; if (x->props.flags & XFRM_STATE_DECAP_DSCP) ipv4_copy_dscp(iph, skb->h.ipiph); if (!(x->props.flags & XFRM_STATE_NOECN)) ipip_ecn_decapsulate(skb); skb->mac.raw = memmove(skb->data - skb->mac_len, skb->mac.raw, skb->mac_len); skb->nh.raw = skb->data; memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); decaps = 1; break; } #endif if ((err = xfrm_parse_spi(skb, skb->nh.iph->protocol, &spi, &seq)) < 0) goto drop; } while (!err);
static int xfrm4_output_one(struct sk_buff *skb) { struct dst_entry *dst = skb->dst; struct xfrm_state *x = dst->xfrm; int err; /* purpose: 0014838 author: paul.chen date: 2011-12-06 */ /* description: Fix reboot & plug in crash for VPN G2G wildcard */ if(skb && skb->nh.iph && skb->nh.iph->protocol==IPPROTO_IGMP) { err = -EINVAL; goto error_nolock; } if (skb->ip_summed == CHECKSUM_HW) { err = skb_checksum_help(skb, 0); if (err) goto error_nolock; } if (x->props.mode) { err = xfrm4_tunnel_check_size(skb); if (err) goto error_nolock; } do { spin_lock_bh(&x->lock); err = xfrm_state_check(x, skb); if (err) goto error; #if defined(CONFIG_CAVIUM_OCTEON_IPSEC) && defined(CONFIG_NET_KEY) /* * If Octeon IPSEC Acceleration module has been loaded * call it, otherwise, follow the software path */ if(cavium_ipsec_process) { if (skb_is_nonlinear(skb) && skb_linearize(skb, GFP_ATOMIC) != 0) { err = -ENOMEM; goto error; } err = cavium_ipsec_process(x->sa_handle, skb, 0, 1 /*ENCRYPT*/); } else { xfrm4_encap(skb); err = x->type->output(x, skb); } #else xfrm4_encap(skb); err = x->type->output(x, skb); #endif if (err) goto error; x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock_bh(&x->lock); if (!(skb->dst = dst_pop(dst))) { err = -EHOSTUNREACH; goto error_nolock; } dst = skb->dst; x = dst->xfrm; } while (x && !x->props.mode); IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; err = 0; out_exit: return err; error: spin_unlock_bh(&x->lock); error_nolock: kfree_skb(skb); goto out_exit; }
static int xfrm_output_one(struct sk_buff *skb, int err) { struct dst_entry *dst = skb_dst(skb); struct xfrm_state *x = dst->xfrm; struct net *net = xs_net(x); if (err <= 0) goto resume; do { err = xfrm_skb_check_space(skb); if (err) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); goto error_nolock; } err = x->outer_mode->output(x, skb); if (err) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR); goto error_nolock; } spin_lock_bh(&x->lock); if (unlikely(x->km.state != XFRM_STATE_VALID)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID); err = -EINVAL; goto error; } err = xfrm_state_check_expire(x); if (err) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEEXPIRED); goto error; } err = x->repl->overflow(x, skb); if (err) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR); goto error; } x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock_bh(&x->lock); skb_dst_force(skb); #if defined(CONFIG_CAVIUM_OCTEON_IPSEC) && defined(CONFIG_NET_KEY) /* * * If Octeon IPSEC Acceleration module has been loaded * * call it, otherwise, follow the software path * */ if(cavium_ipsec_process) { if (skb_is_nonlinear(skb) && skb_linearize(skb) != 0) { err = -ENOMEM; goto error; } err = cavium_ipsec_process(x, skb, 0, 1 /*ENCRYPT*/); } else { err = x->type->output(x, skb); } #else err = x->type->output(x, skb); #endif if (err == -EINPROGRESS) goto out_exit; resume: if (err) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR); goto error_nolock; } dst = skb_dst_pop(skb); if (!dst) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); err = -EHOSTUNREACH; goto error_nolock; } skb_dst_set(skb, dst); x = dst->xfrm; } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); err = 0; out_exit: return err; error: spin_unlock_bh(&x->lock); error_nolock: kfree_skb(skb); goto out_exit; }