// may return NULL, caller should check struct sk_buff *ccci_alloc_skb(int size, char blocking) { int count = 0; struct sk_buff *skb = NULL; if(size>SKB_4K || size<0) goto err_exit; skb = blocking?NULL:__alloc_skb_from_kernel(size); if(!skb) { slow_retry: skb = __alloc_skb_from_pool(size); } if(unlikely(!skb)) { if(blocking) { CCCI_INF_MSG(-1, BM, "skb pool is empty! size=%d (%d)\n", size, count++); msleep(100); goto slow_retry; } else { fast_retry: skb = __alloc_skb_from_kernel(size); if(!skb && count++<20) goto fast_retry; } } err_exit: if(unlikely(!skb)) CCCI_ERR_MSG(-1, BM, "%ps alloc skb fail, size=%d\n", __builtin_return_address(0), size); else CCCI_DBG_MSG(-1, BM, "%ps alloc skb %p, size=%d\n", __builtin_return_address(0), skb, size); return skb; }
void ccci_skb_enqueue(struct ccci_skb_queue *queue, struct sk_buff *newsk) { unsigned long flags; spin_lock_irqsave(&queue->skb_list.lock, flags); if (queue->skb_list.qlen < queue->max_len) { __skb_queue_tail(&queue->skb_list, newsk); if (queue->skb_list.qlen > queue->max_history) queue->max_history = queue->skb_list.qlen; } else { #if 0 if (queue->pre_filled) { CCCI_ERR_MSG(0, BM, "skb queue too long, max=%d\n", queue->max_len); #else if (1) { #endif #ifdef CCCI_MEM_BM_DEBUG if (ccci_skb_addr_checker(newsk)) { CCCI_INF_MSG(-1, BM, "ccci_skb_enqueue:ccci_skb_addr_checker failed!\n"); ccci_mem_dump(-1, queue, sizeof(struct ccci_skb_queue)); dump_stack(); } #endif dev_kfree_skb_any(newsk); } else { __skb_queue_tail(&queue->skb_list, newsk); } } spin_unlock_irqrestore(&queue->skb_list.lock, flags); } void ccci_skb_queue_init(struct ccci_skb_queue *queue, unsigned int skb_size, unsigned int max_len, char fill_now) { int i; queue->magic_header = SKB_MAGIC_HEADER; queue->magic_footer = SKB_MAGIC_FOOTER; #ifdef CCCI_WP_DEBUG if (((unsigned long)queue) == ((unsigned long)(&skb_pool_16))) { CCCI_INF_MSG(-1, BM, "ccci_skb_queue_init: add hwp skb_pool_16.magic_footer=%p!\n", &queue->magic_footer); enable_watchpoint(&queue->magic_footer); } #endif skb_queue_head_init(&queue->skb_list); queue->max_len = max_len; if (fill_now) { for (i = 0; i < queue->max_len; i++) { struct sk_buff *skb = __alloc_skb_from_kernel(skb_size, GFP_KERNEL); if (skb != NULL) skb_queue_tail(&queue->skb_list, skb); } queue->pre_filled = 1; } else { queue->pre_filled = 0; } queue->max_history = 0; }
static void __16_reload_work(struct work_struct *work) { struct sk_buff *skb; CCCI_DBG_MSG(-1, BM, "refill 16B skb pool\n"); while (skb_pool_16.skb_list.qlen < SKB_POOL_SIZE_16) { skb = __alloc_skb_from_kernel(SKB_16, GFP_KERNEL); if (skb) skb_queue_tail(&skb_pool_16.skb_list, skb); else CCCI_ERR_MSG(-1, BM, "fail to reload 16B pool\n"); } }
/* may return NULL, caller should check, network should always use blocking as we do not want it consume our own pool */ struct sk_buff *ccci_alloc_skb(int size, char from_pool, char blocking) { int count = 0; struct sk_buff *skb = NULL; #ifdef CCCI_MEM_BM_DEBUG ccci_magic_checker(); #endif if (size > SKB_4K || size < 0) goto err_exit; if (from_pool) { slow_retry: skb = __alloc_skb_from_pool(size); if (unlikely(!skb && blocking)) { CCCI_INF_MSG(-1, BM, "skb pool is empty! size=%d (%d)\n", size, count++); msleep(100); goto slow_retry; } } else { if (blocking) { skb = __alloc_skb_from_kernel(size, GFP_KERNEL); } else { fast_retry: skb = __alloc_skb_from_kernel(size, GFP_ATOMIC); if (!skb && count++ < 20) goto fast_retry; } } err_exit: if (unlikely(!skb)) CCCI_ERR_MSG(-1, BM, "%ps alloc skb fail, size=%d\n", __builtin_return_address(0), size); else CCCI_DBG_MSG(-1, BM, "%ps alloc skb %p, size=%d\n", __builtin_return_address(0), skb, size); return skb; }