static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) { int err, plen, dlen; struct iphdr *iph; struct ipcomp_data *ipcd = x->data; u8 *start, *scratch = ipcd->scratch; plen = skb->len; dlen = IPCOMP_SCRATCH_SIZE; start = skb->data; err = crypto_comp_decompress(ipcd->tfm, start, plen, scratch, &dlen); if (err) goto out; if (dlen < (plen + sizeof(struct ip_comp_hdr))) { err = -EINVAL; goto out; } err = pskb_expand_head(skb, 0, dlen - plen, GFP_ATOMIC); if (err) goto out; skb_put(skb, dlen - plen); memcpy(skb->data, scratch, dlen); iph = skb->nh.iph; iph->tot_len = htons(dlen + iph->ihl * 4); out: return err; }
static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) { struct ipcomp_data *ipcd = x->data; const int plen = skb->len; int dlen = IPCOMP_SCRATCH_SIZE; const u8 *start = skb->data; const int cpu = get_cpu(); u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen); int len; if (err) goto out; if (dlen < (plen + sizeof(struct ip_comp_hdr))) { err = -EINVAL; goto out; } len = dlen - plen; if (len > skb_tailroom(skb)) len = skb_tailroom(skb); __skb_put(skb, len); len += plen; skb_copy_to_linear_data(skb, scratch, len); while ((scratch += len, dlen -= len) > 0) { skb_frag_t *frag; <<<<<<< HEAD struct page *page; ======= <<<<<<< HEAD
int zcomp_decompress(struct zcomp_strm *zstrm, const void *src, unsigned int src_len, void *dst) { unsigned int dst_len = PAGE_SIZE; return crypto_comp_decompress(zstrm->tfm, src, src_len, dst, &dst_len); }
static void decompress_record(struct pstore_record *record) { int ret; int unzipped_len; char *unzipped, *workspace; if (!record->compressed) return; /* Only PSTORE_TYPE_DMESG support compression. */ if (record->type != PSTORE_TYPE_DMESG) { pr_warn("ignored compressed record type %d\n", record->type); return; } /* Missing compression buffer means compression was not initialized. */ if (!big_oops_buf) { pr_warn("no decompression method initialized!\n"); return; } /* Allocate enough space to hold max decompression and ECC. */ unzipped_len = big_oops_buf_sz; workspace = kmalloc(unzipped_len + record->ecc_notice_size, GFP_KERNEL); if (!workspace) return; /* After decompression "unzipped_len" is almost certainly smaller. */ ret = crypto_comp_decompress(tfm, record->buf, record->size, workspace, &unzipped_len); if (ret) { pr_err("crypto_comp_decompress failed, ret = %d!\n", ret); kfree(workspace); return; } /* Append ECC notice to decompressed buffer. */ memcpy(workspace + unzipped_len, record->buf + record->size, record->ecc_notice_size); /* Copy decompressed contents into an minimum-sized allocation. */ unzipped = kmemdup(workspace, unzipped_len + record->ecc_notice_size, GFP_KERNEL); kfree(workspace); if (!unzipped) return; /* Swap out compressed contents with decompressed contents. */ kfree(record->buf); record->buf = unzipped; record->size = unzipped_len; record->compressed = false; }
static int zswap_comp_op(enum comp_op op, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { struct crypto_comp *tfm; int ret; tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, get_cpu()); switch (op) { case ZSWAP_COMPOP_COMPRESS: ret = crypto_comp_compress(tfm, src, slen, dst, dlen); break; case ZSWAP_COMPOP_DECOMPRESS: ret = crypto_comp_decompress(tfm, src, slen, dst, dlen); break; default: ret = -EINVAL; } put_cpu(); return ret; }
static inline int zcache_comp_op(enum comp_op op, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { struct crypto_comp *tfm; int ret; BUG_ON(!zcache_comp_pcpu_tfms); tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, get_cpu()); BUG_ON(!tfm); switch (op) { case ZCACHE_COMPOP_COMPRESS: ret = crypto_comp_compress(tfm, src, slen, dst, dlen); break; case ZCACHE_COMPOP_DECOMPRESS: ret = crypto_comp_decompress(tfm, src, slen, dst, dlen); break; } put_cpu(); return ret; }
int scfs_decompress_crypto(const void *in_buf, size_t in_len, void *out_buf, size_t *out_len, int compr_type) { int err; struct scfs_compressor *compr; unsigned int tmp_len; if (unlikely(compr_type < 0 || compr_type >= SCFS_COMP_TOTAL_TYPES)) { SCFS_PRINT_ERROR("invalid compression type %d", compr_type); return -EINVAL; } compr = scfs_compressors[compr_type]; if (unlikely(!compr->capi_name)) { SCFS_PRINT_ERROR("%s compression is not compiled in", compr->name); return -EINVAL; } if (compr_type == SCFS_COMP_NONE) { memcpy(out_buf, in_buf, in_len); *out_len = in_len; return 0; } if (compr->decomp_mutex) mutex_lock(compr->decomp_mutex); tmp_len = (unsigned int)*out_len; err = crypto_comp_decompress(compr->cc, in_buf, in_len, out_buf, &tmp_len); *out_len = (size_t)tmp_len; if (compr->decomp_mutex) mutex_unlock(compr->decomp_mutex); if (err) SCFS_PRINT_ERROR("cannot decompress %d bytes, compressor %s, " "error %d", in_len, compr->name, err); return err; }
int ubifs_decompress(const void *in_buf, int in_len, void *out_buf, int *out_len, int compr_type) { int err; struct ubifs_compressor *compr; if (unlikely(compr_type < 0 || compr_type >= UBIFS_COMPR_TYPES_CNT)) { ubifs_err("invalid compression type %d", compr_type); return -EINVAL; } compr = ubifs_compressors[compr_type]; if (unlikely(!compr->capi_name)) { ubifs_err("%s compression is not compiled in", compr->name); return -EINVAL; } if (compr_type == UBIFS_COMPR_NONE) { memcpy(out_buf, in_buf, in_len); *out_len = in_len; return 0; } if (compr->decomp_mutex) mutex_lock(compr->decomp_mutex); err = crypto_comp_decompress(compr->cc, in_buf, in_len, out_buf, (unsigned int *)out_len); if (compr->decomp_mutex) mutex_unlock(compr->decomp_mutex); if (err) ubifs_err("cannot decompress %d bytes, compressor %s, " "error %d", in_len, compr->name, err); return err; }
static int ipcomp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb) { int err = 0; u8 nexthdr = 0; u8 *prevhdr; int hdr_len = skb->h.raw - skb->nh.raw; unsigned char *tmp_hdr = NULL; struct ipv6hdr *iph; int plen, dlen; struct ipcomp_data *ipcd = x->data; u8 *start, *scratch = ipcd->scratch; if ((skb_is_nonlinear(skb) || skb_cloned(skb)) && skb_linearize(skb, GFP_ATOMIC) != 0) { err = -ENOMEM; goto out; } skb->ip_summed = CHECKSUM_NONE; /* Remove ipcomp header and decompress original payload */ iph = skb->nh.ipv6h; tmp_hdr = kmalloc(hdr_len, GFP_ATOMIC); if (!tmp_hdr) goto out; memcpy(tmp_hdr, iph, hdr_len); nexthdr = *(u8 *)skb->data; skb_pull(skb, sizeof(struct ipv6_comp_hdr)); skb->nh.raw += sizeof(struct ipv6_comp_hdr); memcpy(skb->nh.raw, tmp_hdr, hdr_len); iph = skb->nh.ipv6h; iph->payload_len = htons(ntohs(iph->payload_len) - sizeof(struct ipv6_comp_hdr)); skb->h.raw = skb->data; /* decompression */ plen = skb->len; dlen = IPCOMP_SCRATCH_SIZE; start = skb->data; err = crypto_comp_decompress(ipcd->tfm, start, plen, scratch, &dlen); if (err) { err = -EINVAL; goto out; } if (dlen < (plen + sizeof(struct ipv6_comp_hdr))) { err = -EINVAL; goto out; } err = pskb_expand_head(skb, 0, dlen - plen, GFP_ATOMIC); if (err) { goto out; } skb_put(skb, dlen - plen); memcpy(skb->data, scratch, dlen); iph = skb->nh.ipv6h; iph->payload_len = htons(skb->len); ip6_find_1stfragopt(skb, &prevhdr); *prevhdr = nexthdr; out: if (tmp_hdr) kfree(tmp_hdr); if (err) goto error_out; return nexthdr; error_out: return err; }
static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) { struct ipcomp_data *ipcd = x->data; const int plen = skb->len; int dlen = IPCOMP_SCRATCH_SIZE; const u8 *start = skb->data; const int cpu = get_cpu(); u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen); int len; if (err) goto out; if (dlen < (plen + sizeof(struct ip_comp_hdr))) { err = -EINVAL; goto out; } len = dlen - plen; if (len > skb_tailroom(skb)) len = skb_tailroom(skb); __skb_put(skb, len); len += plen; skb_copy_to_linear_data(skb, scratch, len); while ((scratch += len, dlen -= len) > 0) { skb_frag_t *frag; struct page *page; err = -EMSGSIZE; if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) goto out; frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags; page = alloc_page(GFP_ATOMIC); err = -ENOMEM; if (!page) goto out; __skb_frag_set_page(frag, page); len = PAGE_SIZE; if (dlen < len) len = dlen; frag->page_offset = 0; skb_frag_size_set(frag, len); memcpy(skb_frag_address(frag), scratch, len); skb->truesize += len; skb->data_len += len; skb->len += len; skb_shinfo(skb)->nr_frags++; } err = 0; out: put_cpu(); return err; }