/** * Somewhat like skb_shift(). * * Beware: @from can be equal to MAX_SKB_FRAGS if we need to insert a new * fragment after the last one. */ static int __extend_pgfrags(struct sk_buff *skb, struct sk_buff *pskb, int from, int n) { int i, n_frag = 0; struct skb_shared_info *psi, *si = skb_shinfo(skb); if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS - n) { skb_frag_t *f; struct sk_buff *skb_frag; psi = pskb ? skb_shinfo(pskb) : si; skb_frag = psi->frag_list; n_frag = skb_shinfo(skb)->nr_frags + n - MAX_SKB_FRAGS; if (skb_frag && !skb_headlen(skb_frag) && skb_shinfo(skb_frag)->nr_frags <= MAX_SKB_FRAGS - n_frag) { int r = __extend_pgfrags(skb_frag, NULL, 0, n_frag); if (r) return r; } else { skb_frag = alloc_skb(0, GFP_ATOMIC); if (!skb_frag) return -ENOMEM; skb_frag->next = psi->frag_list; psi->frag_list = skb_frag; } for (i = n_frag - 1; i >= 0 && MAX_SKB_FRAGS - n + i >= from; --i) { f = &si->frags[MAX_SKB_FRAGS - n + i]; skb_shinfo(skb_frag)->frags[i] = *f; ss_skb_adjust_data_len(skb, -skb_frag_size(f)); ss_skb_adjust_data_len(skb_frag, skb_frag_size(f)); } skb_shinfo(skb_frag)->nr_frags += n_frag; skb->ip_summed = CHECKSUM_PARTIAL; skb_frag->ip_summed = CHECKSUM_PARTIAL; } memmove(&si->frags[from + n], &si->frags[from], (si->nr_frags - from - n_frag) * sizeof(skb_frag_t)); si->nr_frags += n - n_frag; return 0; }
/** * Somewhat like skb_shift(). * Make room for @n fragments starting with slot @from. * * Beware: @from can be equal to MAX_SKB_FRAGS when a new fragment * is inserted after the last one. * * @return 0 on success, -errno on failure. * @return New SKB in @it->skb if new SKB is allocated. */ static int __extend_pgfrags(struct sk_buff *skb, int from, int n, TfwStr *it) { int i, n_shift, n_excess = 0; struct skb_shared_info *si = skb_shinfo(skb); BUG_ON(from > si->nr_frags); /* No room for @n extra page fragments in the SKB. */ if (si->nr_frags + n > MAX_SKB_FRAGS) { skb_frag_t *f; struct sk_buff *nskb; /* Allocate a new SKB to hold @n_excess page fragments. */ nskb = alloc_skb(0, GFP_ATOMIC); if (nskb == NULL) return -ENOMEM; /* * The number of page fragments that don't fit in the SKB * after the room is prepared for @n page fragments. */ n_excess = si->nr_frags + n - MAX_SKB_FRAGS; /* Shift @n_excess number of page fragments to new SKB. */ if (from < si->nr_frags) { for (i = n_excess - 1; i >= 0; --i) { f = &si->frags[MAX_SKB_FRAGS - n + i]; skb_shinfo(nskb)->frags[i] = *f; ss_skb_adjust_data_len(skb, -skb_frag_size(f)); ss_skb_adjust_data_len(nskb, skb_frag_size(f)); } } skb_shinfo(nskb)->nr_frags += n_excess; it->skb = nskb; } /* Make room for @n page fragments in the SKB. */ n_shift = si->nr_frags - from - n_excess; BUG_ON(n_shift < 0); if (n_shift) memmove(&si->frags[from + n], &si->frags[from], n_shift * sizeof(skb_frag_t)); si->nr_frags += n - n_excess; return 0; }
/* * Make room for @shift fragments starting with slot @i. Then make * a new fragment in slot @i that can hold @size bytes, and it set up. */ static int __new_pgfrag(struct sk_buff *skb, int size, int i, int shift, TfwStr *it) { int off = 0; struct page *page = NULL; skb_frag_t *frag; BUG_ON(i > MAX_SKB_FRAGS); /* * Try to find room for @size bytes in SKB fragments. * If none found, then allocate a new page for the fragment. */ frag = __lookup_pgfrag_room(skb, size); if (frag) { page = skb_frag_page(frag); off = ss_skb_frag_len(frag); __skb_frag_ref(frag); /* get_page(page); */ } else { page = alloc_page(GFP_ATOMIC); if (!page) return -ENOMEM; } /* Make room for @shift fragments starting with slot @i. */ if (__extend_pgfrags(skb, i, shift, it)) { if (frag) __skb_frag_unref(frag); /* put_page(page); */ else __free_page(page); return -ENOMEM; } /* * When the requested slot is right outside the range of the * array of paged fragments, then the new fragment is put as * the first fragment of the next SKB. */ if (i == MAX_SKB_FRAGS) { i = 0; skb = it->skb; } /* Set up the new fragment in slot @i to hold @size bytes. */ __skb_fill_page_desc(skb, i, page, off, size); ss_skb_adjust_data_len(skb, size); return 0; }
/** * Delete @len (the value is positive now) bytes from @frag. * * @return 0 on success, -errno on failure. * @return SKB in @it->skb if new SKB is allocated. * @return pointer to data after the deleted area in @it->ptr. * @return @it->flags is set if @it->ptr points to data in it->skb. */ static int __split_pgfrag_del(struct sk_buff *skb, int i, int off, int len, TfwStr *it) { int tail_len; struct sk_buff *skb_dst; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; struct skb_shared_info *si = skb_shinfo(skb); SS_DBG("[%d]: %s: skb [%p] i [%d] off [%d] len [%d] fragsize [%d]\n", smp_processor_id(), __func__, skb, i, off, len, skb_frag_size(frag)); if (unlikely(off + len > skb_frag_size(frag))) { SS_WARN("Attempt to delete too much\n"); return -EFAULT; } /* Fast path: delete a full fragment. */ if (!off && len == skb_frag_size(frag)) { ss_skb_adjust_data_len(skb, -len); __skb_frag_unref(frag); if (i + 1 < si->nr_frags) memmove(&si->frags[i], &si->frags[i + 1], (si->nr_frags - i - 1) * sizeof(skb_frag_t)); --si->nr_frags; goto lookup_next_ptr; } /* Fast path: delete the head part of a fragment. */ if (!off) { frag->page_offset += len; skb_frag_size_sub(frag, len); ss_skb_adjust_data_len(skb, -len); it->ptr = skb_frag_address(frag); return 0; } /* Fast path: delete the tail part of a fragment. */ if (off + len == skb_frag_size(frag)) { skb_frag_size_sub(frag, len); ss_skb_adjust_data_len(skb, -len); ++i; goto lookup_next_ptr; } /* * Delete data in the middle of a fragment. After the data * is deleted the fragment will contain only the head part, * and the tail part is moved to another fragment. * [frag @i] [frag @i+1 - tail data] * * Make room for a fragment right after the @i fragment * to move the tail part of data there. */ if (__extend_pgfrags(skb, i + 1, 1, it)) return -EFAULT; /* Find the SKB for tail data. */ skb_dst = (i < MAX_SKB_FRAGS - 1) ? skb : it->skb; /* Calculate the length of the tail part. */ tail_len = skb_frag_size(frag) - off - len; /* Trim the fragment with the head part. */ skb_frag_size_sub(frag, len + tail_len); /* Make the fragment with the tail part. */ i = (i + 1) % MAX_SKB_FRAGS; __skb_fill_page_desc(skb_dst, i, skb_frag_page(frag), frag->page_offset + off + len, tail_len); __skb_frag_ref(frag); /* Adjust SKB data lengths. */ ss_skb_adjust_data_len(skb, -len); if (skb != skb_dst) { ss_skb_adjust_data_len(skb, -tail_len); ss_skb_adjust_data_len(skb_dst, tail_len); } /* Get the SKB and the address of data after the deleted area. */ it->flags = (skb != skb_dst); it->ptr = skb_frag_address(&skb_shinfo(skb_dst)->frags[i]); return 0; lookup_next_ptr: /* Get the next fragment after the deleted fragment. */ if (i < si->nr_frags) it->ptr = skb_frag_address(&si->frags[i]); return 0; }
/** * Get room for @len bytes of data starting from offset @off * in fragment @i. * * The room may be found in the preceding fragment if @off is zero. * Otherwise, a new fragment is allocated and fragments around the * fragment @i are rearranged so that data is not actually split * and copied. * * Note: @off is always within the borders of fragment @i. It can * point at the start of a fragment, but it can never point at the * location right after the end of a fragment. In other words, @off * can be zero, but it can not be equal to the size of fragment @i. * * @return 0 on success, -errno on failure. * @return SKB in @it->skb if new SKB is allocated. * @return pointer to the room for new data in @it->ptr. * @return @it->flags is set if @it->ptr points to data in it->skb. */ static int __split_pgfrag_add(struct sk_buff *skb, int i, int off, int len, TfwStr *it) { int tail_len; struct sk_buff *skb_dst; skb_frag_t *frag_dst, *frag = &skb_shinfo(skb)->frags[i]; SS_DBG("[%d]: %s: skb [%p] i [%d] off [%d] len [%d] fragsize [%d]\n", smp_processor_id(), __func__, skb, i, off, len, skb_frag_size(frag)); /* * If @off is zero and there's a preceding page fragment, * then try to append data to that fragment. Go for other * solutions if there's no room. */ if (!off && i) { frag_dst = __check_frag_room(skb, frag - 1, len); if (frag_dst) { /* Coalesce new data with the fragment. */ off = skb_frag_size(frag_dst); skb_frag_size_add(frag_dst, len); ss_skb_adjust_data_len(skb, len); it->ptr = (char *)skb_frag_address(frag_dst) + off; return 0; } } /* * Make a fragment that can hold @len bytes. If @off is * zero, then data is added at the start of fragment @i. * Make a fragment in slot @i, and the original fragment * is shifted forward. If @off is not zero, then make * a fragment in slot @i+1, and make an extra fragment * in slot @i+2 to hold the tail data. */ if (__new_pgfrag(skb, len, i + !!off, 1 + !!off, it)) return -EFAULT; /* If @off is zero, the job is done in __new_pgfrag(). */ if (!off) { it->ptr = skb_frag_address(frag); return 0; } /* * If data is added in the middle of a fragment, then split * the fragment. The head of the fragment stays there, and * the tail of the fragment is moved to a new fragment. * The fragment for new data is placed in between. * [frag @i] [frag @i+1 - new data] [frag @i+2 - tail data] * If @i is close to MAX_SKB_FRAGS, then new fragments may * be located in another SKB. */ /* Find the SKB for tail data. */ skb_dst = (i < MAX_SKB_FRAGS - 2) ? skb : it->skb; /* Calculate the length of the tail part. */ tail_len = skb_frag_size(frag) - off; /* Trim the fragment with the head part. */ skb_frag_size_sub(frag, tail_len); /* Make the fragment with the tail part. */ i = (i + 2) % MAX_SKB_FRAGS; __skb_fill_page_desc(skb_dst, i, skb_frag_page(frag), frag->page_offset + off, tail_len); __skb_frag_ref(frag); /* Adjust SKB data lengths. */ if (skb != skb_dst) { ss_skb_adjust_data_len(skb, -tail_len); ss_skb_adjust_data_len(skb_dst, tail_len); } /* Get the SKB and the address for new data. */ it->flags = !(i < MAX_SKB_FRAGS - 1); frag_dst = it->flags ? &skb_shinfo(it->skb)->frags[0] : frag + 1; it->ptr = skb_frag_address(frag_dst); return 0; }