static struct frag_queue *ip6_frag_intern(unsigned int hash, struct frag_queue *fq_in) { struct frag_queue *fq; write_lock(&ip6_frag_lock); #ifdef CONFIG_SMP for (fq = ip6_frag_hash[hash]; fq; fq = fq->next) { if (fq->id == fq_in->id && !ipv6_addr_cmp(&fq_in->saddr, &fq->saddr) && !ipv6_addr_cmp(&fq_in->daddr, &fq->daddr)) { atomic_inc(&fq->refcnt); write_unlock(&ip6_frag_lock); fq_in->last_in |= COMPLETE; fq_put(fq_in); return fq; } } #endif fq = fq_in; if (!mod_timer(&fq->timer, jiffies + sysctl_ip6frag_time)) atomic_inc(&fq->refcnt); atomic_inc(&fq->refcnt); if((fq->next = ip6_frag_hash[hash]) != NULL) fq->next->pprev = &fq->next; ip6_frag_hash[hash] = fq; fq->pprev = &ip6_frag_hash[hash]; ip6_frag_nqueues++; write_unlock(&ip6_frag_lock); return fq; }
static struct nf_ct_frag6_queue *nf_ct_frag6_intern(unsigned int hash, struct nf_ct_frag6_queue *fq_in) { struct nf_ct_frag6_queue *fq; write_lock(&nf_ct_frag6_lock); #ifdef CONFIG_SMP for (fq = nf_ct_frag6_hash[hash]; fq; fq = fq->next) { if (fq->id == fq_in->id && !ipv6_addr_cmp(&fq_in->saddr, &fq->saddr) && !ipv6_addr_cmp(&fq_in->daddr, &fq->daddr)) { atomic_inc(&fq->refcnt); write_unlock(&nf_ct_frag6_lock); fq_in->last_in |= COMPLETE; fq_put(fq_in, NULL); return fq; } } #endif fq = fq_in; if (!mod_timer(&fq->timer, jiffies + nf_ct_frag6_timeout)) atomic_inc(&fq->refcnt); atomic_inc(&fq->refcnt); if ((fq->next = nf_ct_frag6_hash[hash]) != NULL) fq->next->pprev = &fq->next; nf_ct_frag6_hash[hash] = fq; fq->pprev = &nf_ct_frag6_hash[hash]; INIT_LIST_HEAD(&fq->lru_list); list_add_tail(&fq->lru_list, &nf_ct_frag6_lru_list); nf_ct_frag6_nqueues++; write_unlock(&nf_ct_frag6_lock); return fq; }
static void nf_ct_frag6_evictor(void) { struct nf_ct_frag6_queue *fq; struct list_head *tmp; unsigned int work; work = atomic_read(&nf_ct_frag6_mem); if (work <= nf_ct_frag6_low_thresh) return; work -= nf_ct_frag6_low_thresh; while (work > 0) { read_lock(&nf_ct_frag6_lock); if (list_empty(&nf_ct_frag6_lru_list)) { read_unlock(&nf_ct_frag6_lock); return; } tmp = nf_ct_frag6_lru_list.next; BUG_ON(tmp == NULL); fq = list_entry(tmp, struct nf_ct_frag6_queue, lru_list); atomic_inc(&fq->refcnt); read_unlock(&nf_ct_frag6_lock); spin_lock(&fq->lock); if (!(fq->last_in&COMPLETE)) fq_kill(fq); spin_unlock(&fq->lock); fq_put(fq, &work); } }
static void ip6_frag_expire(unsigned long data) { struct frag_queue *fq = (struct frag_queue *) data; spin_lock(&fq->lock); if (fq->last_in & COMPLETE) goto out; fq_kill(fq); IP6_INC_STATS_BH(Ip6ReasmTimeout); IP6_INC_STATS_BH(Ip6ReasmFails); /* Send error only if the first segment arrived. */ if (fq->last_in&FIRST_IN && fq->fragments) { struct net_device *dev = dev_get_by_index(fq->iif); /* But use as source device on which LAST ARRIVED segment was received. And do not use fq->dev pointer directly, device might already disappeared. */ if (dev) { fq->fragments->dev = dev; icmpv6_send(fq->fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); dev_put(dev); } } out: spin_unlock(&fq->lock); fq_put(fq); }
static void ip6_evictor(void) { int i, progress; do { if (atomic_read(&ip6_frag_mem) <= sysctl_ip6frag_low_thresh) return; progress = 0; for (i = 0; i < IP6Q_HASHSZ; i++) { struct frag_queue *fq; if (ip6_frag_hash[i] == NULL) continue; read_lock(&ip6_frag_lock); if ((fq = ip6_frag_hash[i]) != NULL) { /* find the oldest queue for this hash bucket */ while (fq->next) fq = fq->next; atomic_inc(&fq->refcnt); read_unlock(&ip6_frag_lock); spin_lock(&fq->lock); if (!(fq->last_in&COMPLETE)) fq_kill(fq); spin_unlock(&fq->lock); fq_put(fq); IP6_INC_STATS_BH(Ip6ReasmFails); progress = 1; continue; } read_unlock(&ip6_frag_lock); } } while (progress); }
static void ip6_evictor(void) { struct frag_queue *fq; struct list_head *tmp; for(;;) { if (atomic_read(&ip6_frag_mem) <= sysctl_ip6frag_low_thresh) return; read_lock(&ip6_frag_lock); if (list_empty(&ip6_frag_lru_list)) { read_unlock(&ip6_frag_lock); return; } tmp = ip6_frag_lru_list.next; fq = list_entry(tmp, struct frag_queue, lru_list); atomic_inc(&fq->refcnt); read_unlock(&ip6_frag_lock); spin_lock(&fq->lock); if (!(fq->last_in&COMPLETE)) fq_kill(fq); spin_unlock(&fq->lock); fq_put(fq); IP6_INC_STATS_BH(Ip6ReasmFails); } }
static int ipv6_frag_rcv(struct sk_buff *skb) { struct frag_hdr *fhdr; struct frag_queue *fq; const struct ipv6hdr *hdr = ipv6_hdr(skb); struct net *net = dev_net(skb_dst(skb)->dev); if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED) goto fail_hdr; IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); if (hdr->payload_len==0) goto fail_hdr; if (!pskb_may_pull(skb, (skb_transport_offset(skb) + sizeof(struct frag_hdr)))) goto fail_hdr; hdr = ipv6_hdr(skb); fhdr = (struct frag_hdr *)skb_transport_header(skb); if (!(fhdr->frag_off & htons(0xFFF9))) { skb->transport_header += sizeof(struct frag_hdr); IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); IP6CB(skb)->flags |= IP6SKB_FRAGMENTED; return 1; } if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh) ip6_evictor(net, ip6_dst_idev(skb_dst(skb))); fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr); if (fq != NULL) { int ret; spin_lock(&fq->q.lock); ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); spin_unlock(&fq->q.lock); fq_put(fq); return ret; } IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); kfree_skb(skb); return -1; fail_hdr: IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb)); return -1; }
static int ipv6_frag_rcv(struct sk_buff *skb) { struct frag_hdr *fhdr; struct frag_queue *fq; const struct ipv6hdr *hdr = ipv6_hdr(skb); struct net *net = dev_net(skb_dst(skb)->dev); IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); /* Jumbo payload inhibits frag. header */ if (hdr->payload_len==0) goto fail_hdr; if (!pskb_may_pull(skb, (skb_transport_offset(skb) + sizeof(struct frag_hdr)))) goto fail_hdr; hdr = ipv6_hdr(skb); fhdr = (struct frag_hdr *)skb_transport_header(skb); if (!(fhdr->frag_off & htons(0xFFF9))) { /* It is not a fragmented frame */ skb->transport_header += sizeof(struct frag_hdr); IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); #if 1 // beney.kim printk(KERN_INFO "Fragmentation Header exist, but not fragmented IPv6 packet, need to ignore Checksum"); #endif return 1; } if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh) ip6_evictor(net, ip6_dst_idev(skb_dst(skb))); fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr); if (fq != NULL) { int ret; spin_lock(&fq->q.lock); ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); spin_unlock(&fq->q.lock); fq_put(fq); return ret; } IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); kfree_skb(skb); return -1; fail_hdr: IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb)); return -1; }
int ipv6_reassembly(struct sk_buff **skbp, int nhoff) { struct sk_buff *skb = *skbp; struct net_device *dev = skb->dev; struct frag_hdr *fhdr; struct frag_queue *fq; struct ipv6hdr *hdr; hdr = skb->nh.ipv6h; IP6_INC_STATS_BH(Ip6ReasmReqds); /* Jumbo payload inhibits frag. header */ if (hdr->payload_len==0) { icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw); return -1; } if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+sizeof(struct frag_hdr))) { icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw); return -1; } hdr = skb->nh.ipv6h; fhdr = (struct frag_hdr *)skb->h.raw; if (!(fhdr->frag_off & __constant_htons(0xFFF9))) { /* It is not a fragmented frame */ skb->h.raw += sizeof(struct frag_hdr); IP6_INC_STATS_BH(Ip6ReasmOKs); return (u8*)fhdr - skb->nh.raw; } if (atomic_read(&ip6_frag_mem) > sysctl_ip6frag_high_thresh) ip6_evictor(); if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr)) != NULL) { int ret = -1; spin_lock(&fq->lock); ip6_frag_queue(fq, skb, fhdr, nhoff); if (fq->last_in == (FIRST_IN|LAST_IN) && fq->meat == fq->len) ret = ip6_frag_reasm(fq, skbp, dev); spin_unlock(&fq->lock); fq_put(fq); return ret; } IP6_INC_STATS_BH(Ip6ReasmFails); kfree_skb(skb); return -1; }
static void nf_ct_frag6_expire(unsigned long data) { struct nf_ct_frag6_queue *fq = (struct nf_ct_frag6_queue *) data; spin_lock(&fq->lock); if (fq->last_in & COMPLETE) goto out; fq_kill(fq); out: spin_unlock(&fq->lock); fq_put(fq, NULL); }
static void ip6_frag_expire(unsigned long data) { struct frag_queue *fq; struct net_device *dev = NULL; struct net *net; fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); spin_lock(&fq->q.lock); if (fq->q.last_in & INET_FRAG_COMPLETE) goto out; fq_kill(fq); net = container_of(fq->q.net, struct net, ipv6.frags); dev = dev_get_by_index(net, fq->iif); if (!dev) goto out; rcu_read_lock(); IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); rcu_read_unlock(); /* Don't send error if the first segment did not arrive. */ if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments) goto out; /* But use as source device on which LAST ARRIVED segment was received. And do not use fq->dev pointer directly, device might already disappeared. */ fq->q.fragments->dev = dev; icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); out: if (dev) dev_put(dev); spin_unlock(&fq->q.lock); fq_put(fq); }
static void ip6_frag_expire(unsigned long data) { struct frag_queue *fq; struct net_device *dev = NULL; struct net *net; fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); spin_lock(&fq->q.lock); if (fq->q.last_in & INET_FRAG_COMPLETE) goto out; fq_kill(fq); net = container_of(fq->q.net, struct net, ipv6.frags); dev = dev_get_by_index(net, fq->iif); if (!dev) goto out; rcu_read_lock(); IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); rcu_read_unlock(); if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments) goto out; fq->q.fragments->dev = dev; icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); out: if (dev) dev_put(dev); spin_unlock(&fq->q.lock); fq_put(fq); }
struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb) { struct sk_buff *clone; struct net_device *dev = skb->dev; struct frag_hdr *fhdr; struct nf_ct_frag6_queue *fq; struct ipv6hdr *hdr; int fhoff, nhoff; u8 prevhdr; struct sk_buff *ret_skb = NULL; /* Jumbo payload inhibits frag. header */ if (skb->nh.ipv6h->payload_len == 0) { DEBUGP("payload len = 0\n"); return skb; } if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0) return skb; clone = skb_clone(skb, GFP_ATOMIC); if (clone == NULL) { DEBUGP("Can't clone skb\n"); return skb; } NFCT_FRAG6_CB(clone)->orig = skb; if (!pskb_may_pull(clone, fhoff + sizeof(*fhdr))) { DEBUGP("message is too short.\n"); goto ret_orig; } clone->h.raw = clone->data + fhoff; hdr = clone->nh.ipv6h; fhdr = (struct frag_hdr *)clone->h.raw; if (!(fhdr->frag_off & htons(0xFFF9))) { DEBUGP("Invalid fragment offset\n"); /* It is not a fragmented frame */ goto ret_orig; } if (atomic_read(&nf_ct_frag6_mem) > nf_ct_frag6_high_thresh) nf_ct_frag6_evictor(); fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr); if (fq == NULL) { DEBUGP("Can't find and can't create new queue\n"); goto ret_orig; } spin_lock(&fq->lock); if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) { spin_unlock(&fq->lock); DEBUGP("Can't insert skb to queue\n"); fq_put(fq, NULL); goto ret_orig; } if (fq->last_in == (FIRST_IN|LAST_IN) && fq->meat == fq->len) { ret_skb = nf_ct_frag6_reasm(fq, dev); if (ret_skb == NULL) DEBUGP("Can't reassemble fragmented packets\n"); } spin_unlock(&fq->lock); fq_put(fq, NULL); return ret_skb; ret_orig: kfree_skb(clone); return skb; }