static int ipcomp6_tunnel_attach(struct xfrm_state *x) { struct net *net = xs_net(x); int err = 0; struct xfrm_state *t = NULL; __be32 spi; u32 mark = x->mark.m & x->mark.v; spi = xfrm6_tunnel_spi_lookup(net, (xfrm_address_t *)&x->props.saddr); if (spi) t = xfrm_state_lookup(net, mark, (xfrm_address_t *)&x->id.daddr, spi, IPPROTO_IPV6, AF_INET6); if (!t) { t = ipcomp6_tunnel_create(x); if (!t) { err = -EINVAL; goto out; } xfrm_state_insert(t); xfrm_state_hold(t); } x->tunnel = t; atomic_inc(&t->tunnel_users); out: return err; }
int xfrm_output(struct sock *sk, struct sk_buff *skb) { struct net *net = dev_net(skb_dst(skb)->dev); struct xfrm_state *x = skb_dst(skb)->xfrm; int err; secpath_reset(skb); if (xfrm_dev_offload_ok(skb, x)) { struct sec_path *sp; sp = secpath_dup(skb->sp); if (!sp) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); kfree_skb(skb); return -ENOMEM; } if (skb->sp) secpath_put(skb->sp); skb->sp = sp; skb->encapsulation = 1; sp->olen++; sp->xvec[skb->sp->len++] = x; xfrm_state_hold(x); if (skb_is_gso(skb)) { skb_shinfo(skb)->gso_type |= SKB_GSO_ESP; return xfrm_output2(net, sk, skb); } if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM) goto out; } if (skb_is_gso(skb)) return xfrm_output_gso(net, sk, skb); if (skb->ip_summed == CHECKSUM_PARTIAL) { err = skb_checksum_help(skb); if (err) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); kfree_skb(skb); return err; } } out: return xfrm_output2(net, sk, skb); }
struct sec_path *secpath_dup(struct sec_path *src) { struct sec_path *sp; sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC); if (!sp) return NULL; sp->len = 0; if (src) { int i; memcpy(sp, src, sizeof(*sp)); for (i = 0; i < sp->len; i++) xfrm_state_hold(sp->xvec[i]); } atomic_set(&sp->refcnt, 1); return sp; }
/* * Must be protected by xfrm_cfg_mutex. State and tunnel user references are * always incremented on success. */ static int ipcomp_tunnel_attach(struct xfrm_state *x) { int err = 0; struct xfrm_state *t; t = xfrm_state_lookup(&init_net, (xfrm_address_t *)&x->id.daddr.a4, x->props.saddr.a4, IPPROTO_IPIP, AF_INET); if (!t) { t = ipcomp_tunnel_create(x); if (!t) { err = -EINVAL; goto out; } xfrm_state_insert(t); xfrm_state_hold(t); } x->tunnel = t; atomic_inc(&t->tunnel_users); out: return err; }
/** * ixgbe_ipsec_find_rx_state - find the state that matches * @ipsec: pointer to ipsec struct * @daddr: inbound address to match * @proto: protocol to match * @spi: SPI to match * @ip4: true if using an ipv4 address * * Returns a pointer to the matching SA state information **/ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec, __be32 *daddr, u8 proto, __be32 spi, bool ip4) { struct rx_sa *rsa; struct xfrm_state *ret = NULL; rcu_read_lock(); hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, spi) if (spi == rsa->xs->id.spi && ((ip4 && *daddr == rsa->xs->id.daddr.a4) || (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6, sizeof(rsa->xs->id.daddr.a6)))) && proto == rsa->xs->id.proto) { ret = rsa->xs; xfrm_state_hold(ret); break; } rcu_read_unlock(); return ret; }
static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { struct net *net = sock_net(skb->sk); struct xfrm_usersa_info *p = nlmsg_data(nlh); struct xfrm_state *x; int err; struct km_event c; err = verify_newsa_info(p, attrs); if (err) return err; x = xfrm_state_construct(net, p, attrs, &err); if (!x) return err; xfrm_state_hold(x); if (nlh->nlmsg_type == XFRM_MSG_NEWSA) err = xfrm_state_add(x); else err = xfrm_state_update(x); xfrm_audit_state_add(x, err ? 0 : 1, true); if (err < 0) { x->km.state = XFRM_STATE_DEAD; __xfrm_state_put(x); goto out; } c.seq = nlh->nlmsg_seq; c.portid = nlh->nlmsg_pid; c.event = nlh->nlmsg_type; km_state_notify(x, &c); out: xfrm_state_put(x); return err; }
static int xfrm_output_one(struct sk_buff *skb, int err) { struct dst_entry *dst = skb_dst(skb); struct xfrm_state *x = dst->xfrm; struct net *net = xs_net(x); #if defined(CONFIG_SYNO_COMCERTO) && (defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)) struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH]; int xfrm_nr = 0; int i; #endif if (err <= 0) goto resume; do { #if defined(CONFIG_SYNO_COMCERTO) && (defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)) if (x->offloaded) { if (xfrm_nr == XFRM_MAX_DEPTH) { err = -ENOBUFS; goto out_exit; } if (!x->curlft.use_time) x->curlft.use_time = get_seconds(); xfrm_vec[xfrm_nr++] = x; skb->ipsec_offload = 1; goto next_dst; } #endif err = xfrm_state_check_space(x, skb); if (err) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); goto error_nolock; } err = x->outer_mode->output(x, skb); if (err) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR); goto error_nolock; } spin_lock_bh(&x->lock); err = xfrm_state_check_expire(x); if (err) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEEXPIRED); goto error; } err = x->repl->overflow(x, skb); if (err) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR); goto error; } x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock_bh(&x->lock); skb_dst_force(skb); err = x->type->output(x, skb); if (err == -EINPROGRESS) goto out_exit; resume: if (err) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR); goto error_nolock; } #if defined(CONFIG_SYNO_COMCERTO) && (defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)) next_dst: #endif dst = skb_dst_pop(skb); if (!dst) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); err = -EHOSTUNREACH; goto error_nolock; } skb_dst_set(skb, dst); x = dst->xfrm; } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); err = 0; #if defined(CONFIG_SYNO_COMCERTO) && (defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)) if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { struct sec_path *sp; sp = secpath_dup(skb->sp); if (!sp) goto error_nolock; if (skb->sp) secpath_put(skb->sp); skb->sp = sp; } if (xfrm_nr + skb->sp->len > XFRM_MAX_DEPTH) goto error_nolock; memcpy(skb->sp->xvec + skb->sp->len, xfrm_vec, xfrm_nr * sizeof(xfrm_vec[0])); skb->sp->len += xfrm_nr; for (i = 0; i < skb->sp->len; i++) xfrm_state_hold(skb->sp->xvec[i]); #endif out_exit: return err; error: spin_unlock_bh(&x->lock); error_nolock: kfree_skb(skb); goto out_exit; }