void __kfree_skb(struct sk_buff *skb) { if (skb->list) { printk(KERN_WARNING "Warning: kfree_skb passed an skb still " "on a list (from %p).\n", NET_CALLER(skb)); BUG(); } dst_release(skb->dst); #ifdef CONFIG_XFRM secpath_put(skb->sp); #endif if(skb->destructor) { if (in_irq()) printk(KERN_WARNING "Warning: kfree_skb on " "hard IRQ %p\n", NET_CALLER(skb)); skb->destructor(skb); } #ifdef CONFIG_NETFILTER nf_conntrack_put(skb->nfct); #ifdef CONFIG_BRIDGE_NETFILTER nf_bridge_put(skb->nf_bridge); #endif #endif /* XXX: IS this still necessary? - JHS */ #ifdef CONFIG_NET_SCHED skb->tc_index = 0; #ifdef CONFIG_NET_CLS_ACT skb->tc_verd = 0; skb->tc_classid = 0; #endif #endif kfree_skbmem(skb); }
static void skb_release_head_state(struct sk_buff *skb) { skb_dst_drop(skb); #ifdef CONFIG_XFRM secpath_put(skb->sp); #endif if (skb->destructor) { // WARN_ON(in_irq()); skb->destructor(skb); } #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put(skb->nfct); #endif #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED nf_conntrack_put_reasm(skb->nfct_reasm); #endif #ifdef CONFIG_BRIDGE_NETFILTER nf_bridge_put(skb->nf_bridge); #endif /* XXX: IS this still necessary? - JHS */ #ifdef CONFIG_NET_SCHED skb->tc_index = 0; #ifdef CONFIG_NET_CLS_ACT skb->tc_verd = 0; #endif #endif }
void __kfree_skb(struct sk_buff *skb) { dst_release(skb->dst); #ifdef CONFIG_XFRM secpath_put(skb->sp); #endif if (skb->destructor) { WARN_ON(in_irq()); skb->destructor(skb); } #ifdef CONFIG_NETFILTER nf_conntrack_put(skb->nfct); #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put_reasm(skb->nfct_reasm); #endif #ifdef CONFIG_BRIDGE_NETFILTER nf_bridge_put(skb->nf_bridge); #endif #endif /* XXX: IS this still necessary? - JHS */ #ifdef CONFIG_NET_SCHED skb->tc_index = 0; #ifdef CONFIG_NET_CLS_ACT skb->tc_verd = 0; #endif #endif kfree_skbmem(skb); }
void __kfree_skb(struct sk_buff *skb) { BUG_ON(skb->list != NULL); dst_release(skb->dst); #ifdef CONFIG_XFRM secpath_put(skb->sp); #endif if (skb->destructor) { WARN_ON(in_irq()); skb->destructor(skb); } #ifdef CONFIG_NETFILTER nf_conntrack_put(skb->nfct); #ifdef CONFIG_BRIDGE_NETFILTER nf_bridge_put(skb->nf_bridge); #endif #endif /* XXX: IS this still necessary? - JHS */ #ifdef CONFIG_NET_SCHED skb->tc_index = 0; #ifdef CONFIG_NET_CLS_ACT skb->tc_verd = 0; skb->tc_classid = 0; #endif #endif kfree_skbmem(skb); }
int xfrm_output(struct sock *sk, struct sk_buff *skb) { struct net *net = dev_net(skb_dst(skb)->dev); struct xfrm_state *x = skb_dst(skb)->xfrm; int err; secpath_reset(skb); if (xfrm_dev_offload_ok(skb, x)) { struct sec_path *sp; sp = secpath_dup(skb->sp); if (!sp) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); kfree_skb(skb); return -ENOMEM; } if (skb->sp) secpath_put(skb->sp); skb->sp = sp; skb->encapsulation = 1; sp->olen++; sp->xvec[skb->sp->len++] = x; xfrm_state_hold(x); if (skb_is_gso(skb)) { skb_shinfo(skb)->gso_type |= SKB_GSO_ESP; return xfrm_output2(net, sk, skb); } if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM) goto out; } if (skb_is_gso(skb)) return xfrm_output_gso(net, sk, skb); if (skb->ip_summed == CHECKSUM_PARTIAL) { err = skb_checksum_help(skb); if (err) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); kfree_skb(skb); return err; } } out: return xfrm_output2(net, sk, skb); }
int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto) { struct net *net = dev_net(skb->dev); struct xfrm_state *x = NULL; int i = 0; if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { struct sec_path *sp; sp = secpath_dup(skb->sp); if (!sp) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); goto drop; } if (skb->sp) secpath_put(skb->sp); skb->sp = sp; } if (1 + skb->sp->len == XFRM_MAX_DEPTH) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); goto drop; } for (i = 0; i < 3; i++) { xfrm_address_t *dst, *src; switch (i) { case 0: dst = daddr; src = saddr; break; case 1: dst = daddr; src = (xfrm_address_t *)&in6addr_any; break; default: dst = (xfrm_address_t *)&in6addr_any; src = (xfrm_address_t *)&in6addr_any; break; } x = xfrm_state_lookup_byaddr(net, skb->mark, dst, src, proto, AF_INET6); if (!x) continue; spin_lock(&x->lock); if ((!i || (x->props.flags & XFRM_STATE_WILDRECV)) && likely(x->km.state == XFRM_STATE_VALID) && !xfrm_state_check_expire(x)) { spin_unlock(&x->lock); if (x->type->input(x, skb) > 0) { break; } } else spin_unlock(&x->lock); xfrm_state_put(x); x = NULL; } if (!x) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); xfrm_audit_state_notfound_simple(skb, AF_INET6); goto drop; } skb->sp->xvec[skb->sp->len++] = x; spin_lock(&x->lock); x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock(&x->lock); return 1; drop: return -1; }
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) { struct net *net = dev_net(skb->dev); int err; __be32 seq; __be32 seq_hi; struct xfrm_state *x; xfrm_address_t *daddr; struct xfrm_mode *inner_mode; unsigned int family; int decaps = 0; int async = 0; /* */ if (encap_type < 0) { async = 1; x = xfrm_input_state(skb); seq = XFRM_SKB_CB(skb)->seq.input.low; goto resume; } /* */ if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { struct sec_path *sp; sp = secpath_dup(skb->sp); if (!sp) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); goto drop; } if (skb->sp) secpath_put(skb->sp); skb->sp = sp; } daddr = (xfrm_address_t *)(skb_network_header(skb) + XFRM_SPI_SKB_CB(skb)->daddroff); family = XFRM_SPI_SKB_CB(skb)->family; seq = 0; if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); goto drop; } do { if (skb->sp->len == XFRM_MAX_DEPTH) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); goto drop; } x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family); if (x == NULL) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); xfrm_audit_state_notfound(skb, family, spi, seq); goto drop; } skb->sp->xvec[skb->sp->len++] = x; spin_lock(&x->lock); if (unlikely(x->km.state != XFRM_STATE_VALID)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEINVALID); goto drop_unlock; } if ((x->encap ? x->encap->encap_type : 0) != encap_type) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH); goto drop_unlock; } if (x->repl->check(x, skb, seq)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); goto drop_unlock; } if (xfrm_state_check_expire(x)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED); goto drop_unlock; } spin_unlock(&x->lock); seq_hi = htonl(xfrm_replay_seqhi(x, seq)); XFRM_SKB_CB(skb)->seq.input.low = seq; XFRM_SKB_CB(skb)->seq.input.hi = seq_hi; skb_dst_force(skb); nexthdr = x->type->input(x, skb); if (nexthdr == -EINPROGRESS) return 0; resume: spin_lock(&x->lock); if (nexthdr <= 0) { if (nexthdr == -EBADMSG) { xfrm_audit_state_icvfail(x, skb, x->type->proto); x->stats.integrity_failed++; } XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR); goto drop_unlock; } /* */ encap_type = 0; if (async && x->repl->check(x, skb, seq)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); goto drop_unlock; } x->repl->advance(x, seq); x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock(&x->lock); XFRM_MODE_SKB_CB(skb)->protocol = nexthdr; inner_mode = x->inner_mode; if (x->sel.family == AF_UNSPEC) { inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); if (inner_mode == NULL) goto drop; } if (inner_mode->input(x, skb)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); goto drop; } if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) { decaps = 1; break; } /* */ daddr = &x->id.daddr; family = x->outer_mode->afinfo->family; err = xfrm_parse_spi(skb, nexthdr, &spi, &seq); if (err < 0) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); goto drop; } } while (!err); nf_reset(skb); if (decaps) { skb_dst_drop(skb); netif_rx(skb); return 0; } else { return x->inner_mode->afinfo->transport_finish(skb, async); } drop_unlock: spin_unlock(&x->lock); drop: kfree_skb(skb); return 0; }
int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type) { int err; u32 spi; struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH]; struct xfrm_state *x; int xfrm_nr = 0; int decaps = 0; if ((err = xfrm4_parse_spi(skb, skb->nh.iph->protocol, &spi)) != 0) goto drop; do { struct iphdr *iph = skb->nh.iph; if (xfrm_nr == XFRM_MAX_DEPTH) goto drop; x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi, iph->protocol, AF_INET); if (x == NULL) goto drop; spin_lock(&x->lock); if (unlikely(x->km.state != XFRM_STATE_VALID)) goto drop_unlock; if ((x->encap ? x->encap->encap_type : 0) != encap_type) goto drop_unlock; if (xfrm_state_check_expire(x)) goto drop_unlock; if (x->type->input(x, skb)) goto drop_unlock; /* only the first xfrm gets the encap type */ encap_type = 0; x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock(&x->lock); xfrm_vec[xfrm_nr++] = x; if (x->mode->input(x, skb)) goto drop; if (x->props.mode == XFRM_MODE_TUNNEL) { decaps = 1; break; } if ((err = xfrm_parse_spi(skb, skb->nh.iph->protocol, &spi)) < 0) goto drop; } while (!err); /* Allocate new secpath or COW existing one. */ if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { struct sec_path *sp; sp = secpath_dup(skb->sp); if (!sp) goto drop; if (skb->sp) secpath_put(skb->sp); skb->sp = sp; } if (xfrm_nr + skb->sp->len > XFRM_MAX_DEPTH) goto drop; memcpy(skb->sp->xvec + skb->sp->len, xfrm_vec, xfrm_nr * sizeof(xfrm_vec[0])); skb->sp->len += xfrm_nr; nf_reset(skb); if (decaps) { if (!(skb->dev->flags&IFF_LOOPBACK)) { dst_release(skb->dst); skb->dst = NULL; } netif_rx(skb); return 0; } else { #ifdef CONFIG_NETFILTER __skb_push(skb, skb->data - skb->nh.raw); skb->nh.iph->tot_len = htons(skb->len); ip_send_check(skb->nh.iph); NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, skb->dev, NULL, xfrm4_rcv_encap_finish); return 0; #else return -skb->nh.iph->protocol; #endif } drop_unlock: spin_unlock(&x->lock); xfrm_state_put(x); drop: while (--xfrm_nr >= 0) xfrm_state_put(xfrm_vec[xfrm_nr]); kfree_skb(skb); return 0; }
/* * This function assumes it is being called from dev_queue_xmit() * and that skb is filled properly by that function. */ int ipsec_mast_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ipsec_xmit_state *ixs; IPsecSAref_t SAref; KLIPS_PRINT(debug_mast, "klips_debug:ipsec_mast_start_xmit: skb=%p\n", skb); if(skb == NULL) { printk("ipsec_mast_start_xmit: " "passed NULL\n"); return 0; } ixs = ipsec_xmit_state_new(dev); if(ixs == NULL) return NETDEV_TX_BUSY; ixs->dev = dev; ixs->skb = skb; SAref = 0; #ifdef NETDEV_25 #if defined(CONFIG_NETFILTER) if(skb->nfmark & IPSEC_NFMARK_IS_SAREF_BIT) { SAref = NFmark2IPsecSAref(skb->nfmark); KLIPS_PRINT(debug_mast, "klips_debug:ipsec_mast_start_xmit: " "getting SAref=%d from nfmark\n", SAref); } #endif #endif #ifdef CONFIG_INET_IPSEC_SAREF if(skb->sp && skb->sp->ref != IPSEC_SAREF_NULL) { SAref = skb->sp->ref; KLIPS_PRINT(debug_mast, "klips_debug:ipsec_mast_start_xmit: " "getting SAref=%d from sec_path\n", SAref); } #endif if (ipsec_xmit_sanity_check_skb(ixs) != IPSEC_XMIT_OK) { ipsec_xmit_cleanup(ixs); ipsec_xmit_state_delete(ixs); return 0; } ixs->ipsp = ipsec_sa_getbyref(SAref, IPSEC_REFOTHER); if(ixs->ipsp == NULL) { KLIPS_ERROR(debug_mast, "klips_debug:ipsec_mast_start_xmit: " "%s: no SA for saref=%d\n", dev->name, SAref); ipsec_xmit_cleanup(ixs); ipsec_xmit_state_delete(ixs); return 0; } /* make sure this packet can go out on this SA */ if (ipsec_mast_check_outbound_policy(ixs)) { ipsec_xmit_cleanup(ixs); ipsec_xmit_state_delete(ixs); return 0; } /* fill in outgoing_said using the ipsp we have */ ixs->outgoing_said = ixs->ipsp->ips_said; #ifdef NETDEV_25 #if defined(CONFIG_NETFILTER) /* prevent recursion through the saref route */ if(skb->nfmark & 0x80000000) { skb->nfmark = 0; } #endif #endif #if 0 /* TODO: do we have to also have to do this? */ if(skb->sp && skb->sp->ref != IPSEC_SAREF_NULL) { secpath_put(skb->sp); skb->sp = NULL; } #endif /* * we should be calculating the MTU by looking up a route * based upon the destination in the SA, and then cache * it into the SA, but we don't do that right now. */ ixs->cur_mtu = 1460; ixs->physmtu = 1460; ixs->mast_mode = 1; ixs->xsm_complete = ipsec_mast_xsm_complete; ixs->state = IPSEC_XSM_INIT2; /* we start later in the process */ ixs->prv = netdev_priv(ixs->dev); ixs->stats = (struct net_device_stats *) &(ixs->prv->mystats); ipsec_xsm(ixs); return 0; }
int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type) { int err; u32 spi, seq; struct sec_decap_state xfrm_vec[XFRM_MAX_DEPTH]; struct xfrm_state *x; int xfrm_nr = 0; int decaps = 0; if ((err = xfrm4_parse_spi(skb, skb->nh.iph->protocol, &spi, &seq)) != 0) goto drop; do { struct iphdr *iph = skb->nh.iph; if (xfrm_nr == XFRM_MAX_DEPTH) goto drop; x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi, iph->protocol, AF_INET); if (x == NULL) goto drop; spin_lock(&x->lock); if (unlikely(x->km.state != XFRM_STATE_VALID)) goto drop_unlock; if (x->props.replay_window && xfrm_replay_check(x, seq)) goto drop_unlock; if (xfrm_state_check_expire(x)) goto drop_unlock; xfrm_vec[xfrm_nr].decap.decap_type = encap_type; if (x->type->input(x, &(xfrm_vec[xfrm_nr].decap), skb)) goto drop_unlock; /* only the first xfrm gets the encap type */ encap_type = 0; if (x->props.replay_window) xfrm_replay_advance(x, seq); x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock(&x->lock); xfrm_vec[xfrm_nr++].xvec = x; iph = skb->nh.iph; if (x->props.mode) { if (iph->protocol != IPPROTO_IPIP) goto drop; if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto drop; if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto drop; if (!(x->props.flags & XFRM_STATE_NOECN)) ipip_ecn_decapsulate(skb); skb->mac.raw = memmove(skb->data - skb->mac_len, skb->mac.raw, skb->mac_len); skb->nh.raw = skb->data; memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); decaps = 1; break; } if ((err = xfrm_parse_spi(skb, skb->nh.iph->protocol, &spi, &seq)) < 0) goto drop; } while (!err); /* Allocate new secpath or COW existing one. */ if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { struct sec_path *sp; sp = secpath_dup(skb->sp); if (!sp) goto drop; if (skb->sp) secpath_put(skb->sp); skb->sp = sp; } if (xfrm_nr + skb->sp->len > XFRM_MAX_DEPTH) goto drop; memcpy(skb->sp->x+skb->sp->len, xfrm_vec, xfrm_nr*sizeof(struct sec_decap_state)); skb->sp->len += xfrm_nr; if (decaps) { if (!(skb->dev->flags&IFF_LOOPBACK)) { dst_release(skb->dst); skb->dst = NULL; } netif_rx(skb); return 0; } else { return -skb->nh.iph->protocol; } drop_unlock: spin_unlock(&x->lock); xfrm_state_put(x); drop: while (--xfrm_nr >= 0) xfrm_state_put(xfrm_vec[xfrm_nr].xvec); kfree_skb(skb); return 0; }
int xfrm6_rcv_spi(struct sk_buff *skb, __be32 spi) { int err; __be32 seq; struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH]; struct xfrm_state *x; int xfrm_nr = 0; int decaps = 0; int nexthdr; unsigned int nhoff; nhoff = IP6CB(skb)->nhoff; nexthdr = skb_network_header(skb)[nhoff]; seq = 0; if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) goto drop; do { struct ipv6hdr *iph = skb->nh.ipv6h; if (xfrm_nr == XFRM_MAX_DEPTH) goto drop; x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi, nexthdr != IPPROTO_IPIP ? nexthdr : IPPROTO_IPV6, AF_INET6); if (x == NULL) goto drop; spin_lock(&x->lock); if (unlikely(x->km.state != XFRM_STATE_VALID)) goto drop_unlock; if (x->props.replay_window && xfrm_replay_check(x, seq)) goto drop_unlock; if (xfrm_state_check_expire(x)) goto drop_unlock; nexthdr = x->type->input(x, skb); if (nexthdr <= 0) goto drop_unlock; skb_network_header(skb)[nhoff] = nexthdr; if (x->props.replay_window) xfrm_replay_advance(x, seq); x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock(&x->lock); xfrm_vec[xfrm_nr++] = x; if (x->mode->input(x, skb)) goto drop; if (x->props.mode == XFRM_MODE_TUNNEL) { /* XXX */ decaps = 1; break; } if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) < 0) goto drop; } while (!err); /* Allocate new secpath or COW existing one. */ if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { struct sec_path *sp; sp = secpath_dup(skb->sp); if (!sp) goto drop; if (skb->sp) secpath_put(skb->sp); skb->sp = sp; } if (xfrm_nr + skb->sp->len > XFRM_MAX_DEPTH) goto drop; memcpy(skb->sp->xvec + skb->sp->len, xfrm_vec, xfrm_nr * sizeof(xfrm_vec[0])); skb->sp->len += xfrm_nr; skb->ip_summed = CHECKSUM_NONE; nf_reset(skb); if (decaps) { dst_release(skb->dst); skb->dst = NULL; netif_rx(skb); return -1; } else { #ifdef CONFIG_NETFILTER skb->nh.ipv6h->payload_len = htons(skb->len); __skb_push(skb, skb->data - skb_network_header(skb)); NF_HOOK(PF_INET6, NF_IP6_PRE_ROUTING, skb, skb->dev, NULL, ip6_rcv_finish); return -1; #else return 1; #endif } drop_unlock: spin_unlock(&x->lock); xfrm_state_put(x); drop: while (--xfrm_nr >= 0) xfrm_state_put(xfrm_vec[xfrm_nr]); kfree_skb(skb); return -1; }
int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto) { struct xfrm_state *x = NULL; int wildcard = 0; struct in6_addr any; xfrm_address_t *xany; struct xfrm_state *xfrm_vec_one = NULL; int nh = 0; int i = 0; ipv6_addr_set(&any, 0, 0, 0, 0); xany = (xfrm_address_t *)&any; for (i = 0; i < 3; i++) { xfrm_address_t *dst, *src; switch (i) { case 0: dst = daddr; src = saddr; break; case 1: /* lookup state with wild-card source address */ wildcard = 1; dst = daddr; src = xany; break; case 2: default: /* lookup state with wild-card addresses */ wildcard = 1; /* XXX */ dst = xany; src = xany; break; } x = xfrm_state_lookup_byaddr(dst, src, proto, AF_INET6); if (!x) continue; spin_lock(&x->lock); if (wildcard) { if ((x->props.flags & XFRM_STATE_WILDRECV) == 0) { spin_unlock(&x->lock); xfrm_state_put(x); x = NULL; continue; } } if (unlikely(x->km.state != XFRM_STATE_VALID)) { spin_unlock(&x->lock); xfrm_state_put(x); x = NULL; continue; } if (xfrm_state_check_expire(x)) { spin_unlock(&x->lock); xfrm_state_put(x); x = NULL; continue; } nh = x->type->input(x, skb); if (nh <= 0) { spin_unlock(&x->lock); xfrm_state_put(x); x = NULL; continue; } x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock(&x->lock); xfrm_vec_one = x; break; } if (!xfrm_vec_one) goto drop; /* Allocate new secpath or COW existing one. */ if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { struct sec_path *sp; sp = secpath_dup(skb->sp); if (!sp) goto drop; if (skb->sp) secpath_put(skb->sp); skb->sp = sp; } if (1 + skb->sp->len > XFRM_MAX_DEPTH) goto drop; skb->sp->xvec[skb->sp->len] = xfrm_vec_one; skb->sp->len ++; return 1; drop: if (xfrm_vec_one) xfrm_state_put(xfrm_vec_one); return -1; }
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) { struct net *net = dev_net(skb->dev); int err; __be32 seq; __be32 seq_hi; struct xfrm_state *x = NULL; xfrm_address_t *daddr; struct xfrm_mode *inner_mode; u32 mark = skb->mark; unsigned int family; int decaps = 0; int async = 0; /* A negative encap_type indicates async resumption. */ if (encap_type < 0) { async = 1; x = xfrm_input_state(skb); seq = XFRM_SKB_CB(skb)->seq.input.low; family = x->outer_mode->afinfo->family; goto resume; } daddr = (xfrm_address_t *)(skb_network_header(skb) + XFRM_SPI_SKB_CB(skb)->daddroff); family = XFRM_SPI_SKB_CB(skb)->family; /* if tunnel is present override skb->mark value with tunnel i_key */ if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) { switch (family) { case AF_INET: mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key); break; case AF_INET6: mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key); break; } } /* Allocate new secpath or COW existing one. */ if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { struct sec_path *sp; sp = secpath_dup(skb->sp); if (!sp) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); goto drop; } if (skb->sp) secpath_put(skb->sp); skb->sp = sp; } seq = 0; if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); goto drop; } do { if (skb->sp->len == XFRM_MAX_DEPTH) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); goto drop; } x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family); if (x == NULL) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); xfrm_audit_state_notfound(skb, family, spi, seq); goto drop; } skb->sp->xvec[skb->sp->len++] = x; spin_lock(&x->lock); if (unlikely(x->km.state != XFRM_STATE_VALID)) { if (x->km.state == XFRM_STATE_ACQ) XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR); else XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEINVALID); goto drop_unlock; } if ((x->encap ? x->encap->encap_type : 0) != encap_type) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH); goto drop_unlock; } if (x->repl->check(x, skb, seq)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); goto drop_unlock; } if (xfrm_state_check_expire(x)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED); goto drop_unlock; } spin_unlock(&x->lock); if (xfrm_tunnel_check(skb, x, family)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); goto drop; } seq_hi = htonl(xfrm_replay_seqhi(x, seq)); XFRM_SKB_CB(skb)->seq.input.low = seq; XFRM_SKB_CB(skb)->seq.input.hi = seq_hi; skb_dst_force(skb); nexthdr = x->type->input(x, skb); if (nexthdr == -EINPROGRESS) return 0; resume: spin_lock(&x->lock); if (nexthdr <= 0) { if (nexthdr == -EBADMSG) { xfrm_audit_state_icvfail(x, skb, x->type->proto); x->stats.integrity_failed++; } XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR); goto drop_unlock; } /* only the first xfrm gets the encap type */ encap_type = 0; if (async && x->repl->recheck(x, skb, seq)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); goto drop_unlock; } x->repl->advance(x, seq); x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock(&x->lock); XFRM_MODE_SKB_CB(skb)->protocol = nexthdr; inner_mode = x->inner_mode; if (x->sel.family == AF_UNSPEC) { inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); if (inner_mode == NULL) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); goto drop; } } if (inner_mode->input(x, skb)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); goto drop; } if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) { decaps = 1; break; } /* * We need the inner address. However, we only get here for * transport mode so the outer address is identical. */ daddr = &x->id.daddr; family = x->outer_mode->afinfo->family; err = xfrm_parse_spi(skb, nexthdr, &spi, &seq); if (err < 0) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); goto drop; } } while (!err); err = xfrm_rcv_cb(skb, family, x->type->proto, 0); if (err) goto drop; nf_reset(skb); if (decaps) { skb_dst_drop(skb); netif_rx(skb); return 0; } else { return x->inner_mode->afinfo->transport_finish(skb, async); } drop_unlock: spin_unlock(&x->lock); drop: xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1); kfree_skb(skb); return 0; }
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) { int err; __be32 seq; struct xfrm_state *x; xfrm_address_t *daddr; struct xfrm_mode *inner_mode; unsigned int family; int decaps = 0; int async = 0; /* A negative encap_type indicates async resumption. */ if (encap_type < 0) { async = 1; x = xfrm_input_state(skb); seq = XFRM_SKB_CB(skb)->seq.input; goto resume; } /* Allocate new secpath or COW existing one. */ if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { struct sec_path *sp; sp = secpath_dup(skb->sp); if (!sp) { XFRM_INC_STATS(LINUX_MIB_XFRMINERROR); goto drop; } if (skb->sp) secpath_put(skb->sp); skb->sp = sp; } daddr = (xfrm_address_t *)(skb_network_header(skb) + XFRM_SPI_SKB_CB(skb)->daddroff); family = XFRM_SPI_SKB_CB(skb)->family; seq = 0; if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) { XFRM_INC_STATS(LINUX_MIB_XFRMINHDRERROR); goto drop; } do { if (skb->sp->len == XFRM_MAX_DEPTH) { XFRM_INC_STATS(LINUX_MIB_XFRMINBUFFERERROR); goto drop; } x = xfrm_state_lookup(daddr, spi, nexthdr, family); if (x == NULL) { XFRM_INC_STATS(LINUX_MIB_XFRMINNOSTATES); xfrm_audit_state_notfound(skb, family, spi, seq); goto drop; } skb->sp->xvec[skb->sp->len++] = x; spin_lock(&x->lock); if (unlikely(x->km.state != XFRM_STATE_VALID)) { XFRM_INC_STATS(LINUX_MIB_XFRMINSTATEINVALID); goto drop_unlock; } if ((x->encap ? x->encap->encap_type : 0) != encap_type) { XFRM_INC_STATS(LINUX_MIB_XFRMINSTATEMISMATCH); goto drop_unlock; } if (x->props.replay_window && xfrm_replay_check(x, skb, seq)) { XFRM_INC_STATS(LINUX_MIB_XFRMINSTATESEQERROR); goto drop_unlock; } if (xfrm_state_check_expire(x)) { XFRM_INC_STATS(LINUX_MIB_XFRMINSTATEEXPIRED); goto drop_unlock; } spin_unlock(&x->lock); XFRM_SKB_CB(skb)->seq.input = seq; nexthdr = x->type->input(x, skb); if (nexthdr == -EINPROGRESS) return 0; resume: spin_lock(&x->lock); if (nexthdr <= 0) { if (nexthdr == -EBADMSG) { xfrm_audit_state_icvfail(x, skb, x->type->proto); x->stats.integrity_failed++; } XFRM_INC_STATS(LINUX_MIB_XFRMINSTATEPROTOERROR); goto drop_unlock; } /* only the first xfrm gets the encap type */ encap_type = 0; if (x->props.replay_window) xfrm_replay_advance(x, seq); x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock(&x->lock); XFRM_MODE_SKB_CB(skb)->protocol = nexthdr; inner_mode = x->inner_mode; if (x->sel.family == AF_UNSPEC) { inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); if (inner_mode == NULL) goto drop; } if (inner_mode->input(x, skb)) { XFRM_INC_STATS(LINUX_MIB_XFRMINSTATEMODEERROR); goto drop; } if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) { decaps = 1; break; } /* * We need the inner address. However, we only get here for * transport mode so the outer address is identical. */ daddr = &x->id.daddr; family = x->outer_mode->afinfo->family; err = xfrm_parse_spi(skb, nexthdr, &spi, &seq); if (err < 0) { XFRM_INC_STATS(LINUX_MIB_XFRMINHDRERROR); goto drop; } } while (!err); nf_reset(skb); if (decaps) { dst_release(skb->dst); skb->dst = NULL; netif_rx(skb); return 0; } else { return x->inner_mode->afinfo->transport_finish(skb, async); } drop_unlock: spin_unlock(&x->lock); drop: kfree_skb(skb); return 0; }
int __xfrm6_rcv_one(struct sk_buff *skb, xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto) { struct xfrm_state *x = NULL; int wildcard = 0; struct in6_addr any; xfrm_address_t *xany; struct xfrm_state *xfrm_vec_one = NULL; int nh = 0; int i = 0; ipv6_addr_set(&any, 0, 0, 0, 0); xany = (xfrm_address_t *)&any; for (i = 0; i < 3; i++) { xfrm_address_t *dst, *src; switch (i) { case 0: dst = daddr; src = saddr; break; case 1: /* lookup state with wild-card source address */ wildcard = 1; dst = daddr; src = xany; break; case 2: default: /* lookup state with wild-card addresses */ wildcard = 1; /* XXX */ dst = xany; src = xany; break; } x = xfrm_state_lookup_byaddr(dst, src, proto, AF_INET6); if (!x) continue; spin_lock(&x->lock); if (wildcard) { if ((x->props.flags & XFRM_STATE_WILDRECV) == 0) { printk(KERN_INFO "%s: found state is not wild-card.\n", __FUNCTION__); spin_unlock(&x->lock); xfrm_state_put(x); x = NULL; continue; } } if (unlikely(x->km.state != XFRM_STATE_VALID)) { spin_unlock(&x->lock); xfrm_state_put(x); x = NULL; continue; } if (xfrm_state_check_expire(x)) { spin_unlock(&x->lock); xfrm_state_put(x); x = NULL; continue; } nh = x->type->input(x, skb); if (nh <= 0) { spin_unlock(&x->lock); xfrm_state_put(x); x = NULL; continue; } break; } if (!x) goto error; x->curlft.bytes += skb->len; x->curlft.packets++; x->curlft.use_time = (unsigned long) xtime.tv_sec; spin_unlock(&x->lock); xfrm_vec_one = x; /* Allocate new secpath or COW existing one. */ if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { struct sec_path *sp; sp = secpath_dup(skb->sp); if (!sp) { printk(KERN_INFO "%s: dup secpath failed\n", __FUNCTION__); goto error; } if (skb->sp) secpath_put(skb->sp); skb->sp = sp; } if (1 + skb->sp->len > XFRM_MAX_DEPTH) { printk(KERN_INFO "%s: too many states\n", __FUNCTION__); goto error; } skb->sp->xvec[skb->sp->len] = xfrm_vec_one; skb->sp->len ++; skb->ip_summed = CHECKSUM_NONE; return 0; error: return -1; }
static int xfrm_output_one(struct sk_buff *skb, int err) { struct dst_entry *dst = skb_dst(skb); struct xfrm_state *x = dst->xfrm; struct net *net = xs_net(x); #if defined(CONFIG_SYNO_COMCERTO) && (defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)) struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH]; int xfrm_nr = 0; int i; #endif if (err <= 0) goto resume; do { #if defined(CONFIG_SYNO_COMCERTO) && (defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)) if (x->offloaded) { if (xfrm_nr == XFRM_MAX_DEPTH) { err = -ENOBUFS; goto out_exit; } if (!x->curlft.use_time) x->curlft.use_time = get_seconds(); xfrm_vec[xfrm_nr++] = x; skb->ipsec_offload = 1; goto next_dst; } #endif err = xfrm_state_check_space(x, skb); if (err) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); goto error_nolock; } err = x->outer_mode->output(x, skb); if (err) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR); goto error_nolock; } spin_lock_bh(&x->lock); err = xfrm_state_check_expire(x); if (err) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEEXPIRED); goto error; } err = x->repl->overflow(x, skb); if (err) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR); goto error; } x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock_bh(&x->lock); skb_dst_force(skb); err = x->type->output(x, skb); if (err == -EINPROGRESS) goto out_exit; resume: if (err) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR); goto error_nolock; } #if defined(CONFIG_SYNO_COMCERTO) && (defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)) next_dst: #endif dst = skb_dst_pop(skb); if (!dst) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); err = -EHOSTUNREACH; goto error_nolock; } skb_dst_set(skb, dst); x = dst->xfrm; } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); err = 0; #if defined(CONFIG_SYNO_COMCERTO) && (defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)) if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { struct sec_path *sp; sp = secpath_dup(skb->sp); if (!sp) goto error_nolock; if (skb->sp) secpath_put(skb->sp); skb->sp = sp; } if (xfrm_nr + skb->sp->len > XFRM_MAX_DEPTH) goto error_nolock; memcpy(skb->sp->xvec + skb->sp->len, xfrm_vec, xfrm_nr * sizeof(xfrm_vec[0])); skb->sp->len += xfrm_nr; for (i = 0; i < skb->sp->len; i++) xfrm_state_hold(skb->sp->xvec[i]); #endif out_exit: return err; error: spin_unlock_bh(&x->lock); error_nolock: kfree_skb(skb); goto out_exit; }