int skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb, int hlen, struct iovec *iov) { unsigned int csum; int chunk = skb->len - hlen; /* Skip filled elements. Pretty silly, look at memcpy_toiovec, though 8) */ while (iov->iov_len == 0) iov++; if (iov->iov_len < chunk) { if ((unsigned short)csum_fold(skb_checksum(skb, 0, chunk+hlen, skb->csum))) goto csum_error; if (skb_copy_datagram_iovec(skb, hlen, iov, chunk)) goto fault; } else { csum = csum_partial(skb->data, hlen, skb->csum); if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base, chunk, &csum)) goto fault; if ((unsigned short)csum_fold(csum)) goto csum_error; iov->iov_len -= chunk; iov->iov_base += chunk; } return 0; csum_error: return -EINVAL; fault: return -EFAULT; }
/** * skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec. * @skb: skbuff * @hlen: hardware length * @iov: io vector * * Caller _must_ check that skb will fit to this iovec. * * Returns: 0 - success. * -EINVAL - checksum failure. * -EFAULT - fault during copy. Beware, in this case iovec * can be modified! */ int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, struct iovec *iov) { __wsum csum; int chunk = skb->len - hlen; /* Skip filled elements. * Pretty silly, look at memcpy_toiovec, though 8) */ while (!iov->iov_len) iov++; if (iov->iov_len < chunk) { if (__skb_checksum_complete(skb)) goto csum_error; if (skb_copy_datagram_iovec(skb, hlen, iov, chunk)) goto fault; } else { csum = csum_partial(skb->data, hlen, skb->csum); if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base, chunk, &csum)) goto fault; if (csum_fold(csum)) goto csum_error; if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) netdev_rx_csum_fault(skb->dev); iov->iov_len -= chunk; iov->iov_base += chunk; } return 0; csum_error: return -EINVAL; fault: return -EFAULT; }
int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int *csump) { int i, copy; int start = skb->len - skb->data_len; int pos = 0; /* Copy header. */ if ((copy = start-offset) > 0) { int err = 0; if (copy > len) copy = len; *csump = csum_and_copy_to_user(skb->data+offset, to, copy, *csump, &err); if (err) goto fault; if ((len -= copy) == 0) return 0; offset += copy; to += copy; pos = copy; } for (i=0; i<skb_shinfo(skb)->nr_frags; i++) { int end; BUG_TRAP(start <= offset+len); end = start + skb_shinfo(skb)->frags[i].size; if ((copy = end-offset) > 0) { unsigned int csum2; int err = 0; u8 *vaddr; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; struct page *page = frag->page; if (copy > len) copy = len; vaddr = kmap(page); csum2 = csum_and_copy_to_user(vaddr + frag->page_offset + offset-start, to, copy, 0, &err); kunmap(page); if (err) goto fault; *csump = csum_block_add(*csump, csum2, pos); if (!(len -= copy)) return 0; offset += copy; to += copy; pos += copy; } start = end; } if (skb_shinfo(skb)->frag_list) { struct sk_buff *list; for (list = skb_shinfo(skb)->frag_list; list; list=list->next) { int end; BUG_TRAP(start <= offset+len); end = start + list->len; if ((copy = end-offset) > 0) { unsigned int csum2 = 0; if (copy > len) copy = len; if (skb_copy_and_csum_datagram(list, offset-start, to, copy, &csum2)) goto fault; *csump = csum_block_add(*csump, csum2, pos); if ((len -= copy) == 0) return 0; offset += copy; to += copy; pos += copy; } start = end; } } if (len == 0) return 0; fault: return -EFAULT; }