static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt) { struct crypto_tfm *tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); int ret; rctx->flags = tmpl->alg_flags; rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 && ctx->enc_keylen != AES_KEYSIZE_256) { SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); skcipher_request_set_sync_tfm(subreq, ctx->fallback); skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); skcipher_request_set_crypt(subreq, req->src, req->dst, req->nbytes, req->info); ret = encrypt ? crypto_skcipher_encrypt(subreq) : crypto_skcipher_decrypt(subreq); skcipher_request_zero(subreq); return ret; } return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base); }
/* * Encrypt/decrypt big_key data */ static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key) { int ret = -EINVAL; struct scatterlist sgio; SKCIPHER_REQUEST_ON_STACK(req, big_key_skcipher); if (crypto_skcipher_setkey(big_key_skcipher, key, ENC_KEY_SIZE)) { ret = -EAGAIN; goto error; } skcipher_request_set_tfm(req, big_key_skcipher); skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); sg_init_one(&sgio, data, datalen); skcipher_request_set_crypt(req, &sgio, &sgio, datalen, NULL); if (op == BIG_KEY_ENC) ret = crypto_skcipher_encrypt(req); else ret = crypto_skcipher_decrypt(req); skcipher_request_zero(req); error: return ret; }
static int do_page_crypto(struct inode *inode, fscrypt_direction_t rw, pgoff_t index, struct page *src_page, struct page *dest_page, gfp_t gfp_flags) { struct { __le64 index; u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)]; } xts_tweak; struct skcipher_request *req = NULL; DECLARE_FS_COMPLETION_RESULT(ecr); struct scatterlist dst, src; struct fscrypt_info *ci = inode->i_crypt_info; struct crypto_skcipher *tfm = ci->ci_ctfm; int res = 0; req = skcipher_request_alloc(tfm, gfp_flags); if (!req) { printk_ratelimited(KERN_ERR "%s: crypto_request_alloc() failed\n", __func__); return -ENOMEM; } skcipher_request_set_callback( req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, page_crypt_complete, &ecr); BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE); xts_tweak.index = cpu_to_le64(index); memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding)); sg_init_table(&dst, 1); sg_set_page(&dst, dest_page, PAGE_SIZE, 0); sg_init_table(&src, 1); sg_set_page(&src, src_page, PAGE_SIZE, 0); skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, &xts_tweak); if (rw == FS_DECRYPT) res = crypto_skcipher_decrypt(req); else res = crypto_skcipher_encrypt(req); if (res == -EINPROGRESS || res == -EBUSY) { BUG_ON(req->base.data != &ecr); wait_for_completion(&ecr.completion); res = ecr.res; } skcipher_request_free(req); if (res) { printk_ratelimited(KERN_ERR "%s: crypto_skcipher_encrypt() returned %d\n", __func__, res); return res; } return 0; }
/* * ext4_fname_decrypt() * This function decrypts the input filename, and returns * the length of the plaintext. * Errors are returned as negative numbers. * We trust the caller to allocate sufficient memory to oname string. */ static int ext4_fname_decrypt(struct inode *inode, const struct ext4_str *iname, struct ext4_str *oname) { struct ext4_str tmp_in[2], tmp_out[1]; struct skcipher_request *req = NULL; DECLARE_EXT4_COMPLETION_RESULT(ecr); struct scatterlist src_sg, dst_sg; struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info; struct crypto_skcipher *tfm = ci->ci_ctfm; int res = 0; char iv[EXT4_CRYPTO_BLOCK_SIZE]; unsigned lim = max_name_len(inode); if (iname->len <= 0 || iname->len > lim) return -EIO; tmp_in[0].name = iname->name; tmp_in[0].len = iname->len; tmp_out[0].name = oname->name; /* Allocate request */ req = skcipher_request_alloc(tfm, GFP_NOFS); if (!req) { printk_ratelimited( KERN_ERR "%s: crypto_request_alloc() failed\n", __func__); return -ENOMEM; } skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, ext4_dir_crypt_complete, &ecr); /* Initialize IV */ memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE); /* Create encryption request */ sg_init_one(&src_sg, iname->name, iname->len); sg_init_one(&dst_sg, oname->name, oname->len); skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv); res = crypto_skcipher_decrypt(req); if (res == -EINPROGRESS || res == -EBUSY) { wait_for_completion(&ecr.completion); res = ecr.res; } skcipher_request_free(req); if (res < 0) { printk_ratelimited( KERN_ERR "%s: Error in ext4_fname_encrypt (error code %d)\n", __func__, res); return res; } oname->len = strnlen(oname->name, iname->len); return oname->len; }
static int p8_aes_xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes, int enc) { int ret; u8 tweak[AES_BLOCK_SIZE]; u8 *iv; struct blkcipher_walk walk; struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); if (in_interrupt()) { SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); skcipher_request_set_tfm(req, ctx->fallback); skcipher_request_set_callback(req, desc->flags, NULL, NULL); skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); skcipher_request_zero(req); } else { preempt_disable(); pagefault_disable(); enable_kernel_vsx(); blkcipher_walk_init(&walk, dst, src, nbytes); ret = blkcipher_walk_virt(desc, &walk); iv = walk.iv; memset(tweak, 0, AES_BLOCK_SIZE); aes_p8_encrypt(iv, tweak, &ctx->tweak_key); while ((nbytes = walk.nbytes)) { if (enc) aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); else aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); } disable_kernel_vsx(); pagefault_enable(); preempt_enable(); } return ret; }
int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, u64 lblk_num, struct page *src_page, struct page *dest_page, unsigned int len, unsigned int offs, gfp_t gfp_flags) { union fscrypt_iv iv; struct skcipher_request *req = NULL; DECLARE_CRYPTO_WAIT(wait); struct scatterlist dst, src; struct fscrypt_info *ci = inode->i_crypt_info; struct crypto_skcipher *tfm = ci->ci_ctfm; int res = 0; BUG_ON(len == 0); fscrypt_generate_iv(&iv, lblk_num, ci); req = skcipher_request_alloc(tfm, gfp_flags); if (!req) return -ENOMEM; skcipher_request_set_callback( req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait); sg_init_table(&dst, 1); sg_set_page(&dst, dest_page, len, offs); sg_init_table(&src, 1); sg_set_page(&src, src_page, len, offs); skcipher_request_set_crypt(req, &src, &dst, len, &iv); if (rw == FS_DECRYPT) res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait); else res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); skcipher_request_free(req); if (res) { fscrypt_err(inode->i_sb, "%scryption failed for inode %lu, block %llu: %d", (rw == FS_DECRYPT ? "de" : "en"), inode->i_ino, lblk_num, res); return res; } return 0; }
static int simd_skcipher_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_request *subreq; struct crypto_skcipher *child; subreq = skcipher_request_ctx(req); *subreq = *req; if (!crypto_simd_usable() || (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm))) child = &ctx->cryptd_tfm->base; else child = cryptd_skcipher_child(ctx->cryptd_tfm); skcipher_request_set_tfm(subreq, child); return crypto_skcipher_decrypt(subreq); }
static int derived_key_decrypt(struct encrypted_key_payload *epayload, const u8 *derived_key, unsigned int derived_keylen) { struct scatterlist sg_in[1]; struct scatterlist sg_out[2]; struct crypto_skcipher *tfm; struct skcipher_request *req; unsigned int encrypted_datalen; u8 iv[AES_BLOCK_SIZE]; char pad[16]; int ret; encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); req = init_skcipher_req(derived_key, derived_keylen); ret = PTR_ERR(req); if (IS_ERR(req)) goto out; dump_encrypted_data(epayload, encrypted_datalen); memset(pad, 0, sizeof pad); sg_init_table(sg_in, 1); sg_init_table(sg_out, 2); sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen); sg_set_buf(&sg_out[0], epayload->decrypted_data, epayload->decrypted_datalen); sg_set_buf(&sg_out[1], pad, sizeof pad); memcpy(iv, epayload->iv, sizeof(iv)); skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv); ret = crypto_skcipher_decrypt(req); tfm = crypto_skcipher_reqtfm(req); skcipher_request_free(req); crypto_free_skcipher(tfm); if (ret < 0) goto out; dump_decrypted_data(epayload); out: return ret; }
/** * fname_decrypt() - decrypt a filename * * The caller must have allocated sufficient memory for the @oname string. * * Return: 0 on success, -errno on failure */ static int fname_decrypt(struct inode *inode, const struct fscrypt_str *iname, struct fscrypt_str *oname) { struct skcipher_request *req = NULL; DECLARE_CRYPTO_WAIT(wait); struct scatterlist src_sg, dst_sg; struct fscrypt_info *ci = inode->i_crypt_info; struct crypto_skcipher *tfm = ci->ci_ctfm; union fscrypt_iv iv; int res; /* Allocate request */ req = skcipher_request_alloc(tfm, GFP_NOFS); if (!req) return -ENOMEM; skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait); /* Initialize IV */ fscrypt_generate_iv(&iv, 0, ci); /* Create decryption request */ sg_init_one(&src_sg, iname->name, iname->len); sg_init_one(&dst_sg, oname->name, oname->len); skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, &iv); res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait); skcipher_request_free(req); if (res < 0) { fscrypt_err(inode->i_sb, "Filename decryption failed for inode %lu: %d", inode->i_ino, res); return res; } oname->len = strnlen(oname->name, iname->len); return 0; }