static int xfrm4_output_one(struct sk_buff *skb) { struct dst_entry *dst = skb->dst; struct xfrm_state *x = dst->xfrm; int err; err = skb_checksum_setup(skb); if (err) goto error_nolock; if (skb->ip_summed == CHECKSUM_HW) { err = skb_checksum_help(skb, 0); if (err) goto error_nolock; } if (x->props.mode) { err = xfrm4_tunnel_check_size(skb); if (err) goto error_nolock; } do { spin_lock_bh(&x->lock); err = xfrm_state_check(x, skb); if (err) goto error; xfrm4_encap(skb); err = x->type->output(x, skb); if (err) goto error; x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock_bh(&x->lock); if (!(skb->dst = dst_pop(dst))) { err = -EHOSTUNREACH; goto error_nolock; } dst = skb->dst; x = dst->xfrm; } while (x && !x->props.mode); IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; err = 0; out_exit: return err; error: spin_unlock_bh(&x->lock); error_nolock: kfree_skb(skb); goto out_exit; }
int xfrm4_output(struct sk_buff **pskb) { struct sk_buff *skb = *pskb; struct dst_entry *dst = skb->dst; struct xfrm_state *x = dst->xfrm; int err; if (skb->ip_summed == CHECKSUM_HW) { err = skb_checksum_help(pskb, 0); skb = *pskb; if (err) goto error_nolock; } spin_lock_bh(&x->lock); err = xfrm_state_check(x, skb); if (err) goto error; if (x->props.mode) { err = xfrm4_tunnel_check_size(skb); if (err) goto error; } xfrm4_encap(skb); err = x->type->output(pskb); skb = *pskb; if (err) goto error; x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock_bh(&x->lock); if (!(skb->dst = dst_pop(dst))) { err = -EHOSTUNREACH; goto error_nolock; } err = NET_XMIT_BYPASS; out_exit: return err; error: spin_unlock_bh(&x->lock); error_nolock: kfree_skb(skb); goto out_exit; }
static int xfrm4_output_one(struct sk_buff *skb) { struct dst_entry *dst = skb->dst; struct xfrm_state *x = dst->xfrm; int err; /* purpose: 0014838 author: paul.chen date: 2011-12-06 */ /* description: Fix reboot & plug in crash for VPN G2G wildcard */ if(skb && skb->nh.iph && skb->nh.iph->protocol==IPPROTO_IGMP) { err = -EINVAL; goto error_nolock; } if (skb->ip_summed == CHECKSUM_HW) { err = skb_checksum_help(skb, 0); if (err) goto error_nolock; } if (x->props.mode) { err = xfrm4_tunnel_check_size(skb); if (err) goto error_nolock; } do { spin_lock_bh(&x->lock); err = xfrm_state_check(x, skb); if (err) goto error; #if defined(CONFIG_CAVIUM_OCTEON_IPSEC) && defined(CONFIG_NET_KEY) /* * If Octeon IPSEC Acceleration module has been loaded * call it, otherwise, follow the software path */ if(cavium_ipsec_process) { if (skb_is_nonlinear(skb) && skb_linearize(skb, GFP_ATOMIC) != 0) { err = -ENOMEM; goto error; } err = cavium_ipsec_process(x->sa_handle, skb, 0, 1 /*ENCRYPT*/); } else { xfrm4_encap(skb); err = x->type->output(x, skb); } #else xfrm4_encap(skb); err = x->type->output(x, skb); #endif if (err) goto error; x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock_bh(&x->lock); if (!(skb->dst = dst_pop(dst))) { err = -EHOSTUNREACH; goto error_nolock; } dst = skb->dst; x = dst->xfrm; } while (x && !x->props.mode); IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; err = 0; out_exit: return err; error: spin_unlock_bh(&x->lock); error_nolock: kfree_skb(skb); goto out_exit; }