static inline unsigned int crypt_fast(const struct cipher_desc *desc, struct scatter_walk *in, struct scatter_walk *out, unsigned int nbytes, u8 *tmp) { u8 *src, *dst; src = in->data; dst = scatterwalk_samebuf(in, out) ? src : out->data; if (tmp) { memcpy(tmp, in->data, nbytes); src = tmp; dst = tmp; } nbytes = desc->prfn(desc, dst, src, nbytes); if (tmp) memcpy(out->data, tmp, nbytes); scatterwalk_advance(in, nbytes); scatterwalk_advance(out, nbytes); return nbytes; }
/* * Generic encrypt/decrypt wrapper for ciphers, handles operations across * multiple page boundaries by using temporary blocks. In user context, * the kernel is given a chance to schedule us once per block. */ static int crypt(struct crypto_tfm *tfm, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes, cryptfn_t crfn, procfn_t prfn, int enc, void *info) { struct scatter_walk walk_in, walk_out; const unsigned int bsize = crypto_tfm_alg_blocksize(tfm); u8 tmp_src[bsize]; u8 tmp_dst[bsize]; if (!nbytes) return 0; if (nbytes % bsize) { tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; return -EINVAL; } scatterwalk_start(&walk_in, src); scatterwalk_start(&walk_out, dst); for(;;) { u8 *src_p, *dst_p; int in_place; scatterwalk_map(&walk_in); scatterwalk_map(&walk_out); src_p = scatterwalk_whichbuf(&walk_in, bsize, tmp_src); dst_p = scatterwalk_whichbuf(&walk_out, bsize, tmp_dst); in_place = scatterwalk_samebuf(&walk_in, &walk_out, src_p, dst_p); nbytes -= bsize; scatterwalk_copychunks(src_p, &walk_in, bsize, 0); prfn(tfm, dst_p, src_p, crfn, enc, info, in_place); scatterwalk_done(&walk_in, nbytes); scatterwalk_copychunks(dst_p, &walk_out, bsize, 1); scatterwalk_done(&walk_out, nbytes); if (!nbytes) return 0; crypto_yield(tfm); } }