Esempio n. 1
0
struct sk_buff *
__pfq_alloc_skb(unsigned int size, gfp_t priority, int fclone, int node)
{
#ifdef PFQ_USE_SKB_POOL
        struct pfq_percpu_pool *pool = this_cpu_ptr(percpu_pool);

        if (atomic_read(&pool->enable))
                return ____pfq_alloc_skb_pool(size, priority, fclone, node, &pool->rx_pool);
#endif
        return __alloc_skb(size, priority, fclone, node);
}
struct sk_buff *skb_cache_alloc(void *skb_cache_handle)
{
    struct sk_buff *skb;
    struct skb_shared_info *shinfo;
    skb_cache_t *skb_cache = skb_cache_handle;

    /* try first to get skb from the cache */
    local_irq_disable();
    skb = skb_cache->skb_list;
    if (skb)
    {
	skb_cache->skb_list = skb->next;
	skb_cache->count--;
	local_irq_enable();
    }
    else
    {
	local_irq_enable();

	/* alloc a new skb if the cache was empty */
	skb = __alloc_skb(skb_cache->alloc_size, GFP_ATOMIC, 0);

	/* set this skb to use be freed to the cache */
	skb->retfreeq_cb = skb_cache_free;
	skb->retfreeq_context = skb_cache;
	skb->retfreeq_skb_prealloc = 1;
	skb->retfreeq_data_prealloc = 1;

	skb_cache->misses++;
	return skb;
    }

    memset(skb, 0, offsetof(struct sk_buff, truesize));
    atomic_set(&skb->users, 1);
    skb->data = skb->tail = skb->head;
    
    /* re set this info due to the memset above */
    skb->retfreeq_cb = skb_cache_free;
    skb->retfreeq_context = skb_cache;
    skb->retfreeq_skb_prealloc = 1;
    skb->retfreeq_data_prealloc = 1;

    /* make sure we initialize shinfo sequentially */
    shinfo = skb_shinfo(skb);
    atomic_set(&shinfo->dataref, 1);
    shinfo->tso_size = 0;
    shinfo->tso_segs = 0;
    shinfo->ufo_size = 0;
    shinfo->ip6_frag_id = 0;
    shinfo->frag_list = NULL;
    skb_cache->hits++;
    return skb;
}
Esempio n. 3
0
File: skbuff.c Progetto: foxwolf/yjd
/**
 *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
 *	@dev: network device to receive on
 *	@length: length to allocate
 *	@gfp_mask: get_free_pages mask, passed to alloc_skb
 *
 *	Allocate a new &sk_buff and assign it a usage count of one. The
 *	buffer has unspecified headroom built in. Users should allocate
 *	the headroom they think they need without accounting for the
 *	built in space. The built in space is used for optimisations.
 *
 *	%NULL is returned if there is no free memory.
 */
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
	gfp_t gfp_mask)
{
	struct sk_buff *skb;

	skb = __alloc_skb(__FUNCTION__, length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE);
	if (likely(skb))
	{
		skb_reserve(skb, NET_SKB_PAD);
		skb->dev = dev;
	} return skb;
}
Esempio n. 4
0
/**
 *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
 *	@dev: network device to receive on
 *	@length: length to allocate
 *	@gfp_mask: get_free_pages mask, passed to alloc_skb
 *
 *	Allocate a new &sk_buff and assign it a usage count of one. The
 *	buffer has unspecified headroom built in. Users should allocate
 *	the headroom they think they need without accounting for the
 *	built in space. The built in space is used for optimisations.
 *
 *	%NULL is returned if there is no free memory.
 */
struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
		unsigned int length, gfp_t gfp_mask)
{
	int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
	struct sk_buff *skb;

	skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
	if (likely(skb)) {
		skb_reserve(skb, NET_SKB_PAD);
		skb->dev = dev;
	}
	return skb;
}
Esempio n. 5
0
struct sk_buff *
__pfq_alloc_skb(unsigned int size, gfp_t priority, int fclone, int node)
{
#ifdef PFQ_USE_SKB_RECYCLE
        struct local_data *this_cpu = __this_cpu_ptr(cpu_data);

        if (atomic_read(&this_cpu->enable_recycle))
        {
                return ____pfq_alloc_skb_recycle(size, priority, fclone, node, &this_cpu->rx_recycle_list);
        }
#endif
        return __alloc_skb(size, priority, fclone, node);
}