static int __new_pgfrag(struct sk_buff *skb, struct sk_buff *pskb, int size, int i, int shift) { int off = 0; struct page *page = NULL; skb_frag_t *frag; BUG_ON(i > MAX_SKB_FRAGS); frag = __lookup_pgfrag_room(skb, size); if (frag) { page = skb_frag_page(frag); off = ss_skb_frag_len(frag); __skb_frag_ref(frag); } else { page = alloc_page(GFP_ATOMIC); if (!page) return -ENOMEM; } if (__extend_pgfrags(skb, pskb, i, shift)) { if (!frag) __free_page(page); return -ENOMEM; } if (i == MAX_SKB_FRAGS) { /* * Insert a new paged fragment right after the last one * in @skb, i.e. as the first fragment of the next skb. */ skb = skb_shinfo(pskb ? : skb)->frag_list; i = 0; }
/* * Make room for @shift fragments starting with slot @i. Then make * a new fragment in slot @i that can hold @size bytes, and it set up. */ static int __new_pgfrag(struct sk_buff *skb, int size, int i, int shift, TfwStr *it) { int off = 0; struct page *page = NULL; skb_frag_t *frag; BUG_ON(i > MAX_SKB_FRAGS); /* * Try to find room for @size bytes in SKB fragments. * If none found, then allocate a new page for the fragment. */ frag = __lookup_pgfrag_room(skb, size); if (frag) { page = skb_frag_page(frag); off = ss_skb_frag_len(frag); __skb_frag_ref(frag); /* get_page(page); */ } else { page = alloc_page(GFP_ATOMIC); if (!page) return -ENOMEM; } /* Make room for @shift fragments starting with slot @i. */ if (__extend_pgfrags(skb, i, shift, it)) { if (frag) __skb_frag_unref(frag); /* put_page(page); */ else __free_page(page); return -ENOMEM; } /* * When the requested slot is right outside the range of the * array of paged fragments, then the new fragment is put as * the first fragment of the next SKB. */ if (i == MAX_SKB_FRAGS) { i = 0; skb = it->skb; } /* Set up the new fragment in slot @i to hold @size bytes. */ __skb_fill_page_desc(skb, i, page, off, size); ss_skb_adjust_data_len(skb, size); return 0; }
/** * Somewhat like skb_shift(). * * Beware: @from can be equal to MAX_SKB_FRAGS if we need to insert a new * fragment after the last one. */ static int __extend_pgfrags(struct sk_buff *skb, struct sk_buff *pskb, int from, int n) { int i, n_frag = 0; struct skb_shared_info *psi, *si = skb_shinfo(skb); if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS - n) { skb_frag_t *f; struct sk_buff *skb_frag; psi = pskb ? skb_shinfo(pskb) : si; skb_frag = psi->frag_list; n_frag = skb_shinfo(skb)->nr_frags + n - MAX_SKB_FRAGS; if (skb_frag && !skb_headlen(skb_frag) && skb_shinfo(skb_frag)->nr_frags <= MAX_SKB_FRAGS - n_frag) { int r = __extend_pgfrags(skb_frag, NULL, 0, n_frag); if (r) return r; } else { skb_frag = alloc_skb(0, GFP_ATOMIC); if (!skb_frag) return -ENOMEM; skb_frag->next = psi->frag_list; psi->frag_list = skb_frag; } for (i = n_frag - 1; i >= 0 && MAX_SKB_FRAGS - n + i >= from; --i) { f = &si->frags[MAX_SKB_FRAGS - n + i]; skb_shinfo(skb_frag)->frags[i] = *f; ss_skb_adjust_data_len(skb, -skb_frag_size(f)); ss_skb_adjust_data_len(skb_frag, skb_frag_size(f)); } skb_shinfo(skb_frag)->nr_frags += n_frag; skb->ip_summed = CHECKSUM_PARTIAL; skb_frag->ip_summed = CHECKSUM_PARTIAL; } memmove(&si->frags[from + n], &si->frags[from], (si->nr_frags - from - n_frag) * sizeof(skb_frag_t)); si->nr_frags += n - n_frag; return 0; }
/** * Delete @len (the value is positive now) bytes from @frag. * * @return 0 on success, -errno on failure. * @return SKB in @it->skb if new SKB is allocated. * @return pointer to data after the deleted area in @it->ptr. * @return @it->flags is set if @it->ptr points to data in it->skb. */ static int __split_pgfrag_del(struct sk_buff *skb, int i, int off, int len, TfwStr *it) { int tail_len; struct sk_buff *skb_dst; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; struct skb_shared_info *si = skb_shinfo(skb); SS_DBG("[%d]: %s: skb [%p] i [%d] off [%d] len [%d] fragsize [%d]\n", smp_processor_id(), __func__, skb, i, off, len, skb_frag_size(frag)); if (unlikely(off + len > skb_frag_size(frag))) { SS_WARN("Attempt to delete too much\n"); return -EFAULT; } /* Fast path: delete a full fragment. */ if (!off && len == skb_frag_size(frag)) { ss_skb_adjust_data_len(skb, -len); __skb_frag_unref(frag); if (i + 1 < si->nr_frags) memmove(&si->frags[i], &si->frags[i + 1], (si->nr_frags - i - 1) * sizeof(skb_frag_t)); --si->nr_frags; goto lookup_next_ptr; } /* Fast path: delete the head part of a fragment. */ if (!off) { frag->page_offset += len; skb_frag_size_sub(frag, len); ss_skb_adjust_data_len(skb, -len); it->ptr = skb_frag_address(frag); return 0; } /* Fast path: delete the tail part of a fragment. */ if (off + len == skb_frag_size(frag)) { skb_frag_size_sub(frag, len); ss_skb_adjust_data_len(skb, -len); ++i; goto lookup_next_ptr; } /* * Delete data in the middle of a fragment. After the data * is deleted the fragment will contain only the head part, * and the tail part is moved to another fragment. * [frag @i] [frag @i+1 - tail data] * * Make room for a fragment right after the @i fragment * to move the tail part of data there. */ if (__extend_pgfrags(skb, i + 1, 1, it)) return -EFAULT; /* Find the SKB for tail data. */ skb_dst = (i < MAX_SKB_FRAGS - 1) ? skb : it->skb; /* Calculate the length of the tail part. */ tail_len = skb_frag_size(frag) - off - len; /* Trim the fragment with the head part. */ skb_frag_size_sub(frag, len + tail_len); /* Make the fragment with the tail part. */ i = (i + 1) % MAX_SKB_FRAGS; __skb_fill_page_desc(skb_dst, i, skb_frag_page(frag), frag->page_offset + off + len, tail_len); __skb_frag_ref(frag); /* Adjust SKB data lengths. */ ss_skb_adjust_data_len(skb, -len); if (skb != skb_dst) { ss_skb_adjust_data_len(skb, -tail_len); ss_skb_adjust_data_len(skb_dst, tail_len); } /* Get the SKB and the address of data after the deleted area. */ it->flags = (skb != skb_dst); it->ptr = skb_frag_address(&skb_shinfo(skb_dst)->frags[i]); return 0; lookup_next_ptr: /* Get the next fragment after the deleted fragment. */ if (i < si->nr_frags) it->ptr = skb_frag_address(&si->frags[i]); return 0; }
/** * The kernel may allocate a bit more memory for an SKB than what was * requested (see ksize() call in __alloc_skb()). Use the extra memory * if it's enough to hold @n bytes. Otherwise, allocate new linear data. * * @return 0 on success, -errno on failure. * @return SKB in @it->skb if new SKB is allocated. * @return pointer to the room for new data in @it->ptr if making room. * @return pointer to data right after the deleted fragment in @it->ptr. */ static int __split_linear_data(struct sk_buff *skb, char *pspt, int len, TfwStr *it) { int alloc = len > 0; int tail_len = (char *)skb_tail_pointer(skb) - pspt; struct page *page = virt_to_head_page(skb->head); SS_DBG("[%d]: %s: skb [%p] pspt [%p] len [%d] tail_len [%d]\n", smp_processor_id(), __func__, skb, pspt, len, tail_len); BUG_ON(!skb->head_frag); BUG_ON(tail_len <= 0); BUG_ON(!(alloc | tail_len)); BUG_ON(-len > tail_len); /* * Quick and unlikely path: just advance the skb tail pointer. * Note that this only works when we make room. When we remove, * pspt points at the start of the data chunk to remove. In that * case, tail_len can never be zero. */ if (unlikely(!tail_len && len <= ss_skb_tailroom(skb))) { BUG_ON(len < 0); it->ptr = ss_skb_put(skb, len); return 0; } /* * Quick and unlikely path: just move skb tail pointer backward. * Note that this only works when we remove data, and the data * is located exactly at the end of the linear part of an skb. */ if (unlikely((len < 0) && (tail_len == -len))) { ss_skb_put(skb, len); if (skb_is_nonlinear(skb)) it->ptr = skb_frag_address(&skb_shinfo(skb)->frags[0]); return 0; } /* * Data is inserted or deleted in the middle of the linear part, * or there's insufficient room in the linear part of an SKB to * insert @len bytes. * * Don't bother with skb tail room: if the linear part is large, * then it's likely that we'll do some smaller data insertions * later and go by the quick path above. Otherwise, the tail size * is also small. * * The inserted data is placed in a fragment. The tail part is * moved to yet another fragment. The linear part is trimmed to * exclude the deleted data and the tail part. * * Do all allocations before moving the fragments to avoid complex * rollback. */ if (alloc) { if (__new_pgfrag(skb, len, 0, alloc + !!tail_len, it)) return -EFAULT; } else { if (__extend_pgfrags(skb, 0, 1, it)) return -EFAULT; tail_len += len; /* @len is negative. */ } if (tail_len) { int tail_off = pspt - (char *)page_address(page); /* * Trim the linear part by |@len| bytes if data * is deleted. Then trim it further to exclude * the tail data. Finally, set up the fragment * allotted above with the tail data. */ if (len < 0) { tail_off -= len; skb->tail += len; skb->len += len; } skb->tail -= tail_len; skb->data_len += tail_len; skb->truesize += tail_len; __skb_fill_page_desc(skb, alloc, page, tail_off, tail_len); skb_frag_ref(skb, alloc); /* get_page(page); */ } it->ptr = skb_frag_address(&skb_shinfo(skb)->frags[0]); return 0; }