/** * Scan page fragments list for fragments placed at the same page with * @frag and check if the page has enough room to add @len bytes more. * All fragments are scanned when @refcnt reaches 0, otherwise the page * is also used by someone else - give up on checking it. * @return pointer to the last fragment from the page. */ static skb_frag_t * __check_frag_room(struct sk_buff *skb, skb_frag_t *frag, int len) { int i, sz1, sz2, refcnt; struct page *pg = skb_frag_page(frag); skb_frag_t *frag2, *ret = frag; refcnt = page_count(pg); if (refcnt == 1) return frag; /* no other users */ sz1 = PAGE_SIZE - ss_skb_frag_len(frag); for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0 ; --i) { frag2 = &skb_shinfo(skb)->frags[i]; if (frag2 == frag || pg != skb_frag_page(frag2)) continue; sz2 = PAGE_SIZE - ss_skb_frag_len(frag2); if (sz2 < len) return NULL; if (sz2 < sz1) { sz1 = sz2; ret = frag2; } /* Return localy referenced pages only. */ if (--refcnt == 1) return ret; } /* The page is used somewhere else. */ return NULL; }
static int __new_pgfrag(struct sk_buff *skb, struct sk_buff *pskb, int size, int i, int shift) { int off = 0; struct page *page = NULL; skb_frag_t *frag; BUG_ON(i > MAX_SKB_FRAGS); frag = __lookup_pgfrag_room(skb, size); if (frag) { page = skb_frag_page(frag); off = ss_skb_frag_len(frag); __skb_frag_ref(frag); } else { page = alloc_page(GFP_ATOMIC); if (!page) return -ENOMEM; } if (__extend_pgfrags(skb, pskb, i, shift)) { if (!frag) __free_page(page); return -ENOMEM; } if (i == MAX_SKB_FRAGS) { /* * Insert a new paged fragment right after the last one * in @skb, i.e. as the first fragment of the next skb. */ skb = skb_shinfo(pskb ? : skb)->frag_list; i = 0; }
/* * Make room for @shift fragments starting with slot @i. Then make * a new fragment in slot @i that can hold @size bytes, and it set up. */ static int __new_pgfrag(struct sk_buff *skb, int size, int i, int shift, TfwStr *it) { int off = 0; struct page *page = NULL; skb_frag_t *frag; BUG_ON(i > MAX_SKB_FRAGS); /* * Try to find room for @size bytes in SKB fragments. * If none found, then allocate a new page for the fragment. */ frag = __lookup_pgfrag_room(skb, size); if (frag) { page = skb_frag_page(frag); off = ss_skb_frag_len(frag); __skb_frag_ref(frag); /* get_page(page); */ } else { page = alloc_page(GFP_ATOMIC); if (!page) return -ENOMEM; } /* Make room for @shift fragments starting with slot @i. */ if (__extend_pgfrags(skb, i, shift, it)) { if (frag) __skb_frag_unref(frag); /* put_page(page); */ else __free_page(page); return -ENOMEM; } /* * When the requested slot is right outside the range of the * array of paged fragments, then the new fragment is put as * the first fragment of the next SKB. */ if (i == MAX_SKB_FRAGS) { i = 0; skb = it->skb; } /* Set up the new fragment in slot @i to hold @size bytes. */ __skb_fill_page_desc(skb, i, page, off, size); ss_skb_adjust_data_len(skb, size); return 0; }
/** * Look up a page fragment that has @len bytes of room. */ static skb_frag_t * __lookup_pgfrag_room(struct sk_buff *skb, int len) { int i; /* * Iterate in reverse order to use likely moving fragments. * Thus we find free room more frequently and skb fragments * utilize memory limits better. */ for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; --i) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; if (PAGE_SIZE - ss_skb_frag_len(frag) < len) continue; frag = __check_frag_room(skb, frag, len); if (frag) return frag; } return NULL; }