/** * Allocate a new skb that can hold data of length @len. * * An SKB is created complely headerless. The linear part of an SKB * is set apart for headers, and stream data is placed in paged fragments. * Lower layers will take care of prepending all required headers. */ struct sk_buff * ss_skb_alloc_pages(size_t len) { int i_frag, nr_frags = DIV_ROUND_UP(len, PAGE_SIZE); struct sk_buff *skb; BUG_ON(nr_frags > MAX_SKB_FRAGS); skb = ss_skb_alloc(); if (!skb) return NULL; for (i_frag = 0; i_frag < nr_frags; ++i_frag) { struct page *page = alloc_page(GFP_ATOMIC); if (!page) { kfree_skb(skb); return NULL; } #if LINUX_VERSION_CODE < KERNEL_VERSION(4,1,12) /* See __skb_alloc_pages() in include/linux/skbuff.h. */ if (page->pfmemalloc) skb->pfmemalloc = true; #endif __skb_fill_page_desc(skb, i_frag, page, 0, 0); skb_shinfo(skb)->nr_frags++; } return skb; }
/** * Allocate a new skb that can hold @len bytes of data. * * An SKB is created complely headerless. The linear part of an SKB is * set apart for headers, and stream data is placed in paged fragments. * Lower layers will take care of prepending all required headers. * * Similar to alloc_skb_with_frags() except it doesn't allocate multi-page * fragments, and it sets up fragments with zero size. */ struct sk_buff * ss_skb_alloc_pages(size_t len) { int i, nr_frags = DIV_ROUND_UP(len, PAGE_SIZE); struct sk_buff *skb; BUG_ON(nr_frags > MAX_SKB_FRAGS); if ((skb = ss_skb_alloc()) == NULL) return NULL; for (i = 0; i < nr_frags; ++i) { struct page *page = alloc_page(GFP_ATOMIC); if (!page) { kfree_skb(skb); return NULL; } skb_fill_page_desc(skb, i, page, 0, 0); TFW_DBG3("Created new frag %d,%p for skb %p\n", i, page_address(page), skb); } return skb; }