void kfree_skb(struct sk_buff *skb, int rw) { if (skb == NULL) { printk("kfree_skb: skb = NULL (from %p)\n", __builtin_return_address(0)); return; } #ifdef CONFIG_SKB_CHECK IS_SKB(skb); #endif if (skb->lock) { skb->free = 3; /* Free when unlocked */ net_free_locked++; return; } if (skb->free == 2) printk("Warning: kfree_skb passed an skb that nobody set the free flag on! (from %p)\n", __builtin_return_address(0)); if (skb->next) printk("Warning: kfree_skb passed an skb still on a list (from %p).\n", __builtin_return_address(0)); if (skb->sk) { if(skb->sk->prot!=NULL) { if (rw) skb->sk->prot->rfree(skb->sk, skb, skb->mem_len); else skb->sk->prot->wfree(skb->sk, skb, skb->mem_len); } else { unsigned long flags; /* Non INET - default wmalloc/rmalloc handler */ save_flags(flags); cli(); if (rw) skb->sk->rmem_alloc-=skb->mem_len; else skb->sk->wmem_alloc-=skb->mem_len; restore_flags(flags); if(!skb->sk->dead) skb->sk->write_space(skb->sk); kfree_skbmem(skb,skb->mem_len); } } else kfree_skbmem(skb, skb->mem_len); }
void kfree_skb(struct sk_buff *skb, int rw) { if (skb == NULL) { printk(KERN_CRIT "kfree_skb: skb = NULL (from %p)\n", __builtin_return_address(0)); return; } #if CONFIG_SKB_CHECK IS_SKB(skb); #endif if (skb->lock) { skb->free = 3; /* Free when unlocked */ net_free_locked++; return; } if (skb->free == 2) printk(KERN_WARNING "Warning: kfree_skb passed an skb that nobody set the free flag on! (from %p)\n", __builtin_return_address(0)); if (skb->list) printk(KERN_WARNING "Warning: kfree_skb passed an skb still on a list (from %p).\n", __builtin_return_address(0)); if(skb->destructor) skb->destructor(skb); if (skb->sk) { struct sock * sk = skb->sk; if(sk->prot!=NULL) { if (rw) sock_rfree(sk, skb); else sock_wfree(sk, skb); } else { if (rw) atomic_sub(skb->truesize, &sk->rmem_alloc); else { if(!sk->dead) sk->write_space(sk); atomic_sub(skb->truesize, &sk->wmem_alloc); } kfree_skbmem(skb); } } else kfree_skbmem(skb); }
void __kfree_skb(struct sk_buff *skb) { if (skb->list) { printk(KERN_WARNING "Warning: kfree_skb passed an skb still " "on a list (from %p).\n", NET_CALLER(skb)); BUG(); } dst_release(skb->dst); #ifdef CONFIG_XFRM secpath_put(skb->sp); #endif if(skb->destructor) { if (in_irq()) printk(KERN_WARNING "Warning: kfree_skb on " "hard IRQ %p\n", NET_CALLER(skb)); skb->destructor(skb); } #ifdef CONFIG_NETFILTER nf_conntrack_put(skb->nfct); #ifdef CONFIG_BRIDGE_NETFILTER nf_bridge_put(skb->nf_bridge); #endif #endif /* XXX: IS this still necessary? - JHS */ #ifdef CONFIG_NET_SCHED skb->tc_index = 0; #ifdef CONFIG_NET_CLS_ACT skb->tc_verd = 0; skb->tc_classid = 0; #endif #endif kfree_skbmem(skb); }
void skb_cache_end(void *skb_cache_handle) { skb_cache_t *skb_cache = skb_cache_handle; skb_cache_t **p; struct sk_buff *skb, *next; if (!skb_cache) return; del_timer(&skb_cache->timer); skb_cache->timer.expires = 0; /* take off global list */ local_irq_disable(); for (p = &skb_cache_list; *p; p = &(*p)->next) { if (*p == skb_cache) { *p = skb_cache->next; break; } } local_irq_enable(); /* free the skb in the list */ for (skb = skb_cache->skb_list; skb; skb = next) { next = skb->next; skb->retfreeq_cb = NULL; kfree_skbmem(skb); } kfree(skb_cache); }
static void skb_cache_timer_cb(unsigned long arg) { skb_cache_t *skb_cache = (skb_cache_t *)arg; struct sk_buff *skb; if (skb_cache->count <= SKB_LOWER_LIMIT) goto Exit; local_irq_disable(); skb = skb_cache->skb_list; if (skb) skb_cache->skb_list = skb->next; local_irq_enable(); if (skb) { skb_cache->count--; skb->retfreeq_cb = NULL; kfree_skbmem(skb); skb_cache->free_by_timer++; } Exit: /* re-set timer */ init_timer(&skb_cache->timer); skb_cache->timer.expires = SKB_CACHE_TIMEOUT + jiffies; /* following info stays the same skb_cache->timer.data = (unsigned long) skb_cache; skb_cache->timer.function = skb_cache_timer_cb; */ add_timer(&skb_cache->timer); }
static void skb_cache_free(void *context, void *obj, int flag) { struct sk_buff *skb = obj; skb_cache_t *skb_cache = context; if (FREE_SKB != flag) return; /* the cache saves the skb struct together with its data buffer, * if the data buffer is used by other skb, we can not store this skb */ if (atomic_read(&skb_shinfo(skb)->dataref) > 1) { skb_cache->free_invalid++; skb->retfreeq_cb = NULL; kfree_skbmem(skb); return; } local_irq_disable(); skb->next = skb_cache->skb_list; skb_cache->skb_list = skb; skb_cache->count++; local_irq_enable(); }
void __kfree_skb(struct sk_buff *skb) { dst_release(skb->dst); #ifdef CONFIG_XFRM secpath_put(skb->sp); #endif if (skb->destructor) { WARN_ON(in_irq()); skb->destructor(skb); } #ifdef CONFIG_NETFILTER nf_conntrack_put(skb->nfct); #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put_reasm(skb->nfct_reasm); #endif #ifdef CONFIG_BRIDGE_NETFILTER nf_bridge_put(skb->nf_bridge); #endif #endif /* XXX: IS this still necessary? - JHS */ #ifdef CONFIG_NET_SCHED skb->tc_index = 0; #ifdef CONFIG_NET_CLS_ACT skb->tc_verd = 0; #endif #endif kfree_skbmem(skb); }
void __kfree_skb(struct sk_buff *skb) { BUG_ON(skb->list != NULL); dst_release(skb->dst); #ifdef CONFIG_XFRM secpath_put(skb->sp); #endif if (skb->destructor) { WARN_ON(in_irq()); skb->destructor(skb); } #ifdef CONFIG_NETFILTER nf_conntrack_put(skb->nfct); #ifdef CONFIG_BRIDGE_NETFILTER nf_bridge_put(skb->nf_bridge); #endif #endif /* XXX: IS this still necessary? - JHS */ #ifdef CONFIG_NET_SCHED skb->tc_index = 0; #ifdef CONFIG_NET_CLS_ACT skb->tc_verd = 0; skb->tc_classid = 0; #endif #endif kfree_skbmem(skb); }
void sock_rfree(struct sock *sk, struct sk_buff *skb) { int s=skb->truesize; #if CONFIG_SKB_CHECK IS_SKB(skb); #endif kfree_skbmem(skb); if (sk) { atomic_sub(s, &sk->rmem_alloc); } }
void __kfree_skb(struct sk_buff *skb) { if (skb->list) printk(KERN_WARNING "Warning: kfree_skb passed an skb still " "on a list (from %p).\n", __builtin_return_address(0)); dst_release(skb->dst); if(skb->destructor) skb->destructor(skb); skb_headerinit(skb, NULL, 0); /* clean state */ kfree_skbmem(skb); }
void __kfree_skb(struct sk_buff *skb) { if (skb->list) { wprintf("Warning: kfree_skb passed an skb still " "on a list.\n"); //BUG(); } if(skb->destructor) { skb->destructor(skb); } kfree_skbmem(skb); }
void sock_wfree(struct sock *sk, struct sk_buff *skb) { int s=skb->truesize; #if CONFIG_SKB_CHECK IS_SKB(skb); #endif kfree_skbmem(skb); if (sk) { /* In case it might be waiting for more memory. */ sk->write_space(sk); atomic_sub(s, &sk->wmem_alloc); } }
/* We have a good packet. Well, not really "good", just mostly not broken. We must check everything to see if it is good. */ static void el_receive(struct device *dev) { int sksize, pkt_len; struct sk_buff *skb; pkt_len = inw(RX_LOW); if (el_debug > 4) printk(" el_receive %d.\n", pkt_len); if ((pkt_len < 60) || (pkt_len > 1536)) { if (el_debug) printk("%s: bogus packet, length=%d\n", dev->name, pkt_len); el_status.stats.rx_over_errors++; return; } outb(AX_SYS, AX_CMD); sksize = sizeof(struct sk_buff) + pkt_len; skb = alloc_skb(sksize, GFP_ATOMIC); outw(0x00, GP_LOW); if (skb == NULL) { printk("%s: Memory squeeze, dropping packet.\n", dev->name); el_status.stats.rx_dropped++; return; } else { skb->mem_len = sksize; skb->mem_addr = skb; skb->len = pkt_len; skb->dev = dev; insb(DATAPORT, skb->data, pkt_len); #ifdef HAVE_NETIF_RX netif_rx(skb); #else skb->lock = 0; if (dev_rint((unsigned char*)skb, pkt_len, IN_SKBUFF, dev) != 0) { kfree_skbmem(skb, sksize); lp->stats.rx_dropped++; break; } #endif el_status.stats.rx_packets++; } return; }
void __kfree_skb(struct sk_buff *skb) { if (skb->list) { printk(KERN_WARNING "Warning: kfree_skb passed an skb still " "on a list (from %p).\n", NET_CALLER(skb)); BUG(); } dst_release(skb->dst); if(skb->destructor) { if (in_irq()) { printk(KERN_WARNING "Warning: kfree_skb on hard IRQ %p\n", NET_CALLER(skb)); } skb->destructor(skb); } #ifdef CONFIG_NETFILTER nf_conntrack_put(skb->nfct); #endif skb_headerinit(skb, NULL, 0); /* clean state */ kfree_skbmem(skb); }
void __kfree_skb(struct sk_buff *skb) { skb_release_all(skb); kfree_skbmem(skb); //SET_MONITOR_ITEM_VALUE(_g_skb_alloc_size, g_skb_alloc_size); } EXPORT_SYMBOL(__kfree_skb);