static struct crypto_ablkcipher *dst_init_cipher(struct dst_crypto_ctl *ctl, u8 *key) { int err = -EINVAL; struct crypto_ablkcipher *cipher; if (!ctl->cipher_keysize) goto err_out_exit; cipher = crypto_alloc_ablkcipher(ctl->cipher_algo, 0, 0); if (IS_ERR(cipher)) { err = PTR_ERR(cipher); dprintk("%s: failed to allocate cipher '%s', err: %d.\n", __func__, ctl->cipher_algo, err); goto err_out_exit; } crypto_ablkcipher_clear_flags(cipher, ~0); err = crypto_ablkcipher_setkey(cipher, key, ctl->cipher_keysize); if (err) { dprintk("%s: failed to set key for cipher '%s', err: %d.\n", __func__, ctl->cipher_algo, err); goto err_out_free; } return cipher; err_out_free: crypto_free_ablkcipher(cipher); err_out_exit: return ERR_PTR(err); }
static struct crypto_ablkcipher *pohmelfs_init_cipher(struct pohmelfs_sb *psb) { int err = -EINVAL; struct crypto_ablkcipher *cipher; if (!psb->cipher_keysize) goto err_out_exit; cipher = crypto_alloc_ablkcipher(psb->cipher_string, 0, 0); if (IS_ERR(cipher)) { err = PTR_ERR(cipher); dprintk("%s: idx: %u: failed to allocate cipher '%s', err: %d.\n", __func__, psb->idx, psb->cipher_string, err); goto err_out_exit; } crypto_ablkcipher_clear_flags(cipher, ~0); err = crypto_ablkcipher_setkey(cipher, psb->cipher_key, psb->cipher_keysize); if (err) { dprintk("%s: idx: %u: failed to set key for cipher '%s', err: %d.\n", __func__, psb->idx, psb->cipher_string, err); goto err_out_free; } return cipher; err_out_free: crypto_free_ablkcipher(cipher); err_out_exit: return ERR_PTR(err); }
static int rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) { struct crypto_ablkcipher *ctr_tfm; struct ablkcipher_request *req; int ret = -EINVAL; struct aesni_hash_subkey_req_data *req_data; ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0); if (IS_ERR(ctr_tfm)) return PTR_ERR(ctr_tfm); crypto_ablkcipher_clear_flags(ctr_tfm, ~0); ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len); if (ret) goto out_free_ablkcipher; ret = -ENOMEM; req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL); if (!req) goto out_free_ablkcipher; req_data = kmalloc(sizeof(*req_data), GFP_KERNEL); if (!req_data) goto out_free_request; memset(req_data->iv, 0, sizeof(req_data->iv)); /* Clear the data in the hash sub key container to zero.*/ /* We want to cipher all zeros to create the hash sub key. */ memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE); init_completion(&req_data->result.completion); sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE); ablkcipher_request_set_tfm(req, ctr_tfm); ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, rfc4106_set_hash_subkey_done, &req_data->result); ablkcipher_request_set_crypt(req, &req_data->sg, &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv); ret = crypto_ablkcipher_encrypt(req); if (ret == -EINPROGRESS || ret == -EBUSY) { ret = wait_for_completion_interruptible (&req_data->result.completion); if (!ret) ret = req_data->result.err; } kfree(req_data); out_free_request: ablkcipher_request_free(req); out_free_ablkcipher: crypto_free_ablkcipher(ctr_tfm); return ret; }
static int sahara_aes_cra_init(struct crypto_tfm *tfm) { const char *name = tfm->__crt_alg->cra_name; struct sahara_ctx *ctx = crypto_tfm_ctx(tfm); ctx->fallback = crypto_alloc_ablkcipher(name, 0, CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->fallback)) { pr_err("Error allocating fallback algo %s\n", name); return PTR_ERR(ctx->fallback); } tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx); return 0; }
static int qce_ablkcipher_init(struct crypto_tfm *tfm) { struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); memset(ctx, 0, sizeof(*ctx)); tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx); ctx->fallback = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm), CRYPTO_ALG_TYPE_ABLKCIPHER, CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->fallback)) return PTR_ERR(ctx->fallback); return 0; }
/** * f2fs_derive_key_aes() - Derive a key using AES-128-ECB * @deriving_key: Encryption key used for derivatio. * @source_key: Source key to which to apply derivation. * @derived_key: Derived key. * * Return: Zero on success; non-zero otherwise. */ static int f2fs_derive_key_aes(char deriving_key[F2FS_AES_128_ECB_KEY_SIZE], char source_key[F2FS_AES_256_XTS_KEY_SIZE], char derived_key[F2FS_AES_256_XTS_KEY_SIZE]) { int res = 0; struct ablkcipher_request *req = NULL; DECLARE_F2FS_COMPLETION_RESULT(ecr); struct scatterlist src_sg, dst_sg; struct crypto_ablkcipher *tfm = crypto_alloc_ablkcipher("ecb(aes)", 0, 0); if (IS_ERR(tfm)) { res = PTR_ERR(tfm); tfm = NULL; goto out; } crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); req = ablkcipher_request_alloc(tfm, GFP_NOFS); if (!req) { res = -ENOMEM; goto out; } ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, derive_crypt_complete, &ecr); res = crypto_ablkcipher_setkey(tfm, deriving_key, F2FS_AES_128_ECB_KEY_SIZE); if (res < 0) goto out; sg_init_one(&src_sg, source_key, F2FS_AES_256_XTS_KEY_SIZE); sg_init_one(&dst_sg, derived_key, F2FS_AES_256_XTS_KEY_SIZE); ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, F2FS_AES_256_XTS_KEY_SIZE, NULL); res = crypto_ablkcipher_encrypt(req); if (res == -EINPROGRESS || res == -EBUSY) { BUG_ON(req->base.data != &ecr); wait_for_completion(&ecr.completion); res = ecr.res; } out: if (req) ablkcipher_request_free(req); if (tfm) crypto_free_ablkcipher(tfm); return res; }
static int dcp_cra_init(struct crypto_tfm *tfm) { const char *name = tfm->__crt_alg->cra_name; struct dcp_op *ctx = crypto_tfm_ctx(tfm); tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_dev_req_ctx); ctx->fallback = crypto_alloc_ablkcipher(name, 0, CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->fallback)) { dev_err(global_dev->dev, "Error allocating fallback algo %s\n", name); return PTR_ERR(ctx->fallback); } return 0; }
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), .tweak_fn = aesni_xts_tweak, .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), .crypt_fn = lrw_xts_encrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = xts_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), .tweak_fn = aesni_xts_tweak, .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), .crypt_fn = lrw_xts_decrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = xts_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } #endif #ifdef CONFIG_X86_64 static int rfc4106_init(struct crypto_aead *aead) { struct cryptd_aead *cryptd_tfm; struct cryptd_aead **ctx = crypto_aead_ctx(aead); cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", CRYPTO_ALG_INTERNAL, CRYPTO_ALG_INTERNAL); if (IS_ERR(cryptd_tfm)) return PTR_ERR(cryptd_tfm); *ctx = cryptd_tfm; crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); return 0; } static void rfc4106_exit(struct crypto_aead *aead) { struct cryptd_aead **ctx = crypto_aead_ctx(aead); cryptd_free_aead(*ctx); } static void rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err) { struct aesni_gcm_set_hash_subkey_result *result = req->data; if (err == -EINPROGRESS) return; result->err = err; complete(&result->completion); } static int rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) { struct crypto_ablkcipher *ctr_tfm; struct ablkcipher_request *req; int ret = -EINVAL; struct aesni_hash_subkey_req_data *req_data; ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0); if (IS_ERR(ctr_tfm)) return PTR_ERR(ctr_tfm); ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len); if (ret) goto out_free_ablkcipher; ret = -ENOMEM; req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL); if (!req) goto out_free_ablkcipher; req_data = kmalloc(sizeof(*req_data), GFP_KERNEL); if (!req_data) goto out_free_request; memset(req_data->iv, 0, sizeof(req_data->iv)); /* Clear the data in the hash sub key container to zero.*/ /* We want to cipher all zeros to create the hash sub key. */ memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE); init_completion(&req_data->result.completion); sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE); ablkcipher_request_set_tfm(req, ctr_tfm); ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, rfc4106_set_hash_subkey_done, &req_data->result); ablkcipher_request_set_crypt(req, &req_data->sg, &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv); ret = crypto_ablkcipher_encrypt(req); if (ret == -EINPROGRESS || ret == -EBUSY) { ret = wait_for_completion_interruptible (&req_data->result.completion); if (!ret) ret = req_data->result.err; } kfree(req_data); out_free_request: ablkcipher_request_free(req); out_free_ablkcipher: crypto_free_ablkcipher(ctr_tfm); return ret; } static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key, unsigned int key_len) { struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead); if (key_len < 4) { crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } /*Account for 4 byte nonce at the end.*/ key_len -= 4; memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce)); return aes_set_key_common(crypto_aead_tfm(aead), &ctx->aes_key_expanded, key, key_len) ?: rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); } static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, unsigned int key_len) { struct cryptd_aead **ctx = crypto_aead_ctx(parent); struct cryptd_aead *cryptd_tfm = *ctx; return crypto_aead_setkey(&cryptd_tfm->base, key, key_len); } static int common_rfc4106_set_authsize(struct crypto_aead *aead, unsigned int authsize) { switch (authsize) { case 8: case 12: case 16: break; default: return -EINVAL; } return 0; } /* This is the Integrity Check Value (aka the authentication tag length and can * be 8, 12 or 16 bytes long. */ static int rfc4106_set_authsize(struct crypto_aead *parent, unsigned int authsize) { struct cryptd_aead **ctx = crypto_aead_ctx(parent); struct cryptd_aead *cryptd_tfm = *ctx; return crypto_aead_setauthsize(&cryptd_tfm->base, authsize); } static int helper_rfc4106_encrypt(struct aead_request *req) { u8 one_entry_in_sg = 0; u8 *src, *dst, *assoc; __be32 counter = cpu_to_be32(1); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); void *aes_ctx = &(ctx->aes_key_expanded); unsigned long auth_tag_len = crypto_aead_authsize(tfm); u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); struct scatter_walk src_sg_walk; struct scatter_walk dst_sg_walk; unsigned int i; /* Assuming we are supporting rfc4106 64-bit extended */ /* sequence numbers We need to have the AAD length equal */ /* to 16 or 20 bytes */ if (unlikely(req->assoclen != 16 && req->assoclen != 20)) return -EINVAL; /* IV below built */ for (i = 0; i < 4; i++) *(iv+i) = ctx->nonce[i]; for (i = 0; i < 8; i++) *(iv+4+i) = req->iv[i]; *((__be32 *)(iv+12)) = counter; if (sg_is_last(req->src) && req->src->offset + req->src->length <= PAGE_SIZE && sg_is_last(req->dst) && req->dst->offset + req->dst->length <= PAGE_SIZE) { one_entry_in_sg = 1; scatterwalk_start(&src_sg_walk, req->src); assoc = scatterwalk_map(&src_sg_walk); src = assoc + req->assoclen; dst = src; if (unlikely(req->src != req->dst)) { scatterwalk_start(&dst_sg_walk, req->dst); dst = scatterwalk_map(&dst_sg_walk) + req->assoclen; } } else { /* Allocate memory for src, dst, assoc */ assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen, GFP_ATOMIC); if (unlikely(!assoc)) return -ENOMEM; scatterwalk_map_and_copy(assoc, req->src, 0, req->assoclen + req->cryptlen, 0); src = assoc + req->assoclen; dst = src; } kernel_fpu_begin(); aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv, ctx->hash_subkey, assoc, req->assoclen - 8, dst + req->cryptlen, auth_tag_len); kernel_fpu_end(); /* The authTag (aka the Integrity Check Value) needs to be written * back to the packet. */ if (one_entry_in_sg) { if (unlikely(req->src != req->dst)) { scatterwalk_unmap(dst - req->assoclen); scatterwalk_advance(&dst_sg_walk, req->dst->length); scatterwalk_done(&dst_sg_walk, 1, 0); } scatterwalk_unmap(assoc); scatterwalk_advance(&src_sg_walk, req->src->length); scatterwalk_done(&src_sg_walk, req->src == req->dst, 0); } else { scatterwalk_map_and_copy(dst, req->dst, req->assoclen, req->cryptlen + auth_tag_len, 1); kfree(assoc); } return 0; } static int helper_rfc4106_decrypt(struct aead_request *req) { u8 one_entry_in_sg = 0; u8 *src, *dst, *assoc; unsigned long tempCipherLen = 0; __be32 counter = cpu_to_be32(1); int retval = 0; struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); void *aes_ctx = &(ctx->aes_key_expanded); unsigned long auth_tag_len = crypto_aead_authsize(tfm); u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); u8 authTag[16]; struct scatter_walk src_sg_walk; struct scatter_walk dst_sg_walk; unsigned int i; if (unlikely(req->assoclen != 16 && req->assoclen != 20)) return -EINVAL; /* Assuming we are supporting rfc4106 64-bit extended */ /* sequence numbers We need to have the AAD length */ /* equal to 16 or 20 bytes */ tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len); /* IV below built */ for (i = 0; i < 4; i++) *(iv+i) = ctx->nonce[i]; for (i = 0; i < 8; i++) *(iv+4+i) = req->iv[i]; *((__be32 *)(iv+12)) = counter; if (sg_is_last(req->src) && req->src->offset + req->src->length <= PAGE_SIZE && sg_is_last(req->dst) && req->dst->offset + req->dst->length <= PAGE_SIZE) { one_entry_in_sg = 1; scatterwalk_start(&src_sg_walk, req->src); assoc = scatterwalk_map(&src_sg_walk); src = assoc + req->assoclen; dst = src; if (unlikely(req->src != req->dst)) { scatterwalk_start(&dst_sg_walk, req->dst); dst = scatterwalk_map(&dst_sg_walk) + req->assoclen; } } else { /* Allocate memory for src, dst, assoc */ assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); if (!assoc) return -ENOMEM; scatterwalk_map_and_copy(assoc, req->src, 0, req->assoclen + req->cryptlen, 0); src = assoc + req->assoclen; dst = src; } kernel_fpu_begin(); aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv, ctx->hash_subkey, assoc, req->assoclen - 8, authTag, auth_tag_len); kernel_fpu_end(); /* Compare generated tag with passed in tag. */ retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ? -EBADMSG : 0; if (one_entry_in_sg) { if (unlikely(req->src != req->dst)) { scatterwalk_unmap(dst - req->assoclen); scatterwalk_advance(&dst_sg_walk, req->dst->length); scatterwalk_done(&dst_sg_walk, 1, 0); } scatterwalk_unmap(assoc); scatterwalk_advance(&src_sg_walk, req->src->length); scatterwalk_done(&src_sg_walk, req->src == req->dst, 0); } else { scatterwalk_map_and_copy(dst, req->dst, req->assoclen, tempCipherLen, 1); kfree(assoc); } return retval; }
/* * Cipher algorithm self tests */ int _fips_qcrypto_cipher_selftest(struct fips_selftest_data *selftest_d) { int rc = 0, err, tv_index, num_tv; struct crypto_ablkcipher *tfm; struct ablkcipher_request *ablkcipher_req; struct _fips_completion fips_completion; char *k_align_src = NULL; struct scatterlist fips_sg; struct _fips_test_vector_cipher tv_cipher; num_tv = (sizeof(fips_test_vector_cipher)) / (sizeof(struct _fips_test_vector_cipher)); /* One-by-one testing */ for (tv_index = 0; tv_index < num_tv; tv_index++) { memcpy(&tv_cipher, &fips_test_vector_cipher[tv_index], (sizeof(struct _fips_test_vector_cipher))); /* Single buffer allocation for in place operation */ k_align_src = kzalloc(tv_cipher.pln_txt_len, GFP_KERNEL); if (k_align_src == NULL) { pr_err("qcrypto:, Failed to allocate memory for k_align_src %ld\n", PTR_ERR(k_align_src)); return -ENOMEM; } memcpy(&k_align_src[0], tv_cipher.pln_txt, tv_cipher.pln_txt_len); /* use_sw flags are set in dtsi file which makes default Linux API calls to go to s/w crypto instead of h/w crypto. This code makes sure that all selftests calls always go to h/w, independent of DTSI flags. */ if (!strcmp(tv_cipher.mod_alg, "xts(aes)")) { if (selftest_d->prefix_aes_xts_algo) if (_fips_get_alg_cra_name( tv_cipher.mod_alg, selftest_d->algo_prefix, strlen(tv_cipher.mod_alg))) { rc = -1; pr_err("Algo Name is too long for tv %d\n", tv_index); goto clr_buf; } } else { if (selftest_d->prefix_aes_cbc_ecb_ctr_algo) if (_fips_get_alg_cra_name( tv_cipher.mod_alg, selftest_d->algo_prefix, strlen(tv_cipher.mod_alg))) { rc = -1; pr_err("Algo Name is too long for tv %d\n", tv_index); goto clr_buf; } } tfm = crypto_alloc_ablkcipher(tv_cipher.mod_alg, 0, 0); if (IS_ERR(tfm)) { pr_err("qcrypto: %s algorithm not found\n", tv_cipher.mod_alg); rc = -ENOMEM; goto clr_buf; } ablkcipher_req = ablkcipher_request_alloc(tfm, GFP_KERNEL); if (!ablkcipher_req) { pr_err("qcrypto: ablkcipher_request_alloc failed\n"); rc = -ENOMEM; goto clr_tfm; } rc = qcrypto_cipher_set_device(ablkcipher_req, selftest_d->ce_device); if (rc != 0) { pr_err("%s qcrypto_cipher_set_device failed with err %d\n", __func__, rc); goto clr_ablkcipher_req; } ablkcipher_request_set_callback(ablkcipher_req, CRYPTO_TFM_REQ_MAY_BACKLOG, _fips_cb, &fips_completion); crypto_ablkcipher_clear_flags(tfm, ~0); rc = crypto_ablkcipher_setkey(tfm, tv_cipher.key, tv_cipher.klen); if (rc) { pr_err("qcrypto: crypto_ablkcipher_setkey failed\n"); goto clr_ablkcipher_req; } sg_set_buf(&fips_sg, k_align_src, tv_cipher.enc_txt_len); sg_mark_end(&fips_sg); ablkcipher_request_set_crypt(ablkcipher_req, &fips_sg, &fips_sg, tv_cipher.pln_txt_len, tv_cipher.iv); /**** Encryption Test ****/ init_completion(&fips_completion.completion); rc = crypto_ablkcipher_encrypt(ablkcipher_req); if (rc == -EINPROGRESS || rc == -EBUSY) { rc = wait_for_completion_interruptible( &fips_completion.completion); err = fips_completion.err; if (!rc && !err) { INIT_COMPLETION(fips_completion.completion); } else { pr_err("qcrypto:cipher:ENC, wait_for_completion failed\n"); goto clr_ablkcipher_req; } } if (memcmp(k_align_src, tv_cipher.enc_txt, tv_cipher.enc_txt_len)) { rc = -1; goto clr_ablkcipher_req; } /**** Decryption test ****/ init_completion(&fips_completion.completion); rc = crypto_ablkcipher_decrypt(ablkcipher_req); if (rc == -EINPROGRESS || rc == -EBUSY) { rc = wait_for_completion_interruptible( &fips_completion.completion); err = fips_completion.err; if (!rc && !err) { INIT_COMPLETION(fips_completion.completion); } else { pr_err("qcrypto:cipher:DEC, wait_for_completion failed\n"); goto clr_ablkcipher_req; } } if (memcmp(k_align_src, tv_cipher.pln_txt, tv_cipher.pln_txt_len)) rc = -1; clr_ablkcipher_req: ablkcipher_request_free(ablkcipher_req); clr_tfm: crypto_free_ablkcipher(tfm); clr_buf: kzfree(k_align_src); if (rc) return rc; } return rc; }
int _f2fs_get_encryption_info(struct inode *inode) { struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_crypt_info *crypt_info; char full_key_descriptor[F2FS_KEY_DESC_PREFIX_SIZE + (F2FS_KEY_DESCRIPTOR_SIZE * 2) + 1]; struct key *keyring_key = NULL; struct f2fs_encryption_key *master_key; struct f2fs_encryption_context ctx; const struct user_key_payload *ukp; struct crypto_ablkcipher *ctfm; const char *cipher_str; char raw_key[F2FS_MAX_KEY_SIZE]; char mode; int res; res = f2fs_crypto_initialize(); if (res) return res; retry: crypt_info = ACCESS_ONCE(fi->i_crypt_info); if (crypt_info) { if (!crypt_info->ci_keyring_key || key_validate(crypt_info->ci_keyring_key) == 0) return 0; f2fs_free_encryption_info(inode, crypt_info); goto retry; } res = f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, sizeof(ctx), NULL); if (res < 0) return res; else if (res != sizeof(ctx)) return -EINVAL; res = 0; crypt_info = kmem_cache_alloc(f2fs_crypt_info_cachep, GFP_NOFS); if (!crypt_info) return -ENOMEM; crypt_info->ci_flags = ctx.flags; crypt_info->ci_data_mode = ctx.contents_encryption_mode; crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; crypt_info->ci_ctfm = NULL; crypt_info->ci_keyring_key = NULL; memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, sizeof(crypt_info->ci_master_key)); if (S_ISREG(inode->i_mode)) mode = crypt_info->ci_data_mode; else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) mode = crypt_info->ci_filename_mode; else BUG(); switch (mode) { case F2FS_ENCRYPTION_MODE_AES_256_XTS: cipher_str = "xts(aes)"; break; case F2FS_ENCRYPTION_MODE_AES_256_CTS: cipher_str = "cts(cbc(aes))"; break; default: printk_once(KERN_WARNING "f2fs: unsupported key mode %d (ino %u)\n", mode, (unsigned) inode->i_ino); res = -ENOKEY; goto out; } memcpy(full_key_descriptor, F2FS_KEY_DESC_PREFIX, F2FS_KEY_DESC_PREFIX_SIZE); sprintf(full_key_descriptor + F2FS_KEY_DESC_PREFIX_SIZE, "%*phN", F2FS_KEY_DESCRIPTOR_SIZE, ctx.master_key_descriptor); full_key_descriptor[F2FS_KEY_DESC_PREFIX_SIZE + (2 * F2FS_KEY_DESCRIPTOR_SIZE)] = '\0'; keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL); if (IS_ERR(keyring_key)) { res = PTR_ERR(keyring_key); keyring_key = NULL; goto out; } crypt_info->ci_keyring_key = keyring_key; BUG_ON(keyring_key->type != &key_type_logon); ukp = user_key_payload(keyring_key); if (ukp->datalen != sizeof(struct f2fs_encryption_key)) { res = -EINVAL; goto out; } master_key = (struct f2fs_encryption_key *)ukp->data; BUILD_BUG_ON(F2FS_AES_128_ECB_KEY_SIZE != F2FS_KEY_DERIVATION_NONCE_SIZE); BUG_ON(master_key->size != F2FS_AES_256_XTS_KEY_SIZE); res = f2fs_derive_key_aes(ctx.nonce, master_key->raw, raw_key); if (res) goto out; ctfm = crypto_alloc_ablkcipher(cipher_str, 0, 0); if (!ctfm || IS_ERR(ctfm)) { res = ctfm ? PTR_ERR(ctfm) : -ENOMEM; printk(KERN_DEBUG "%s: error %d (inode %u) allocating crypto tfm\n", __func__, res, (unsigned) inode->i_ino); goto out; } crypt_info->ci_ctfm = ctfm; crypto_ablkcipher_clear_flags(ctfm, ~0); crypto_tfm_set_flags(crypto_ablkcipher_tfm(ctfm), CRYPTO_TFM_REQ_WEAK_KEY); res = crypto_ablkcipher_setkey(ctfm, raw_key, f2fs_encryption_key_size(mode)); if (res) goto out; memzero_explicit(raw_key, sizeof(raw_key)); if (cmpxchg(&fi->i_crypt_info, NULL, crypt_info) != NULL) { f2fs_free_crypt_info(crypt_info); goto retry; } return 0; out: if (res == -ENOKEY && !S_ISREG(inode->i_mode)) res = 0; f2fs_free_crypt_info(crypt_info); memzero_explicit(raw_key, sizeof(raw_key)); return res; }
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), .tweak_fn = aesni_xts_tweak, .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), .crypt_fn = lrw_xts_encrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = xts_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), .tweak_fn = aesni_xts_tweak, .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), .crypt_fn = lrw_xts_decrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = xts_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } #endif #ifdef CONFIG_X86_64 static int rfc4106_init(struct crypto_tfm *tfm) { struct cryptd_aead *cryptd_tfm; struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); struct crypto_aead *cryptd_child; struct aesni_rfc4106_gcm_ctx *child_ctx; cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0); if (IS_ERR(cryptd_tfm)) return PTR_ERR(cryptd_tfm); cryptd_child = cryptd_aead_child(cryptd_tfm); child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); memcpy(child_ctx, ctx, sizeof(*ctx)); ctx->cryptd_tfm = cryptd_tfm; tfm->crt_aead.reqsize = sizeof(struct aead_request) + crypto_aead_reqsize(&cryptd_tfm->base); return 0; } static void rfc4106_exit(struct crypto_tfm *tfm) { struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); if (!IS_ERR(ctx->cryptd_tfm)) cryptd_free_aead(ctx->cryptd_tfm); return; } static void rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err) { struct aesni_gcm_set_hash_subkey_result *result = req->data; if (err == -EINPROGRESS) return; result->err = err; complete(&result->completion); } static int rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) { struct crypto_ablkcipher *ctr_tfm; struct ablkcipher_request *req; int ret = -EINVAL; struct aesni_hash_subkey_req_data *req_data; ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0); if (IS_ERR(ctr_tfm)) return PTR_ERR(ctr_tfm); crypto_ablkcipher_clear_flags(ctr_tfm, ~0); ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len); if (ret) goto out_free_ablkcipher; ret = -ENOMEM; req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL); if (!req) goto out_free_ablkcipher; req_data = kmalloc(sizeof(*req_data), GFP_KERNEL); if (!req_data) goto out_free_request; memset(req_data->iv, 0, sizeof(req_data->iv)); /* Clear the data in the hash sub key container to zero.*/ /* We want to cipher all zeros to create the hash sub key. */ memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE); init_completion(&req_data->result.completion); sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE); ablkcipher_request_set_tfm(req, ctr_tfm); ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, rfc4106_set_hash_subkey_done, &req_data->result); ablkcipher_request_set_crypt(req, &req_data->sg, &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv); ret = crypto_ablkcipher_encrypt(req); if (ret == -EINPROGRESS || ret == -EBUSY) { ret = wait_for_completion_interruptible (&req_data->result.completion); if (!ret) ret = req_data->result.err; } kfree(req_data); out_free_request: ablkcipher_request_free(req); out_free_ablkcipher: crypto_free_ablkcipher(ctr_tfm); return ret; } static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, unsigned int key_len) { int ret = 0; struct crypto_tfm *tfm = crypto_aead_tfm(parent); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); struct aesni_rfc4106_gcm_ctx *child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); u8 *new_key_align, *new_key_mem = NULL; if (key_len < 4) { crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } /*Account for 4 byte nonce at the end.*/ key_len -= 4; if (key_len != AES_KEYSIZE_128) { crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce)); /*This must be on a 16 byte boundary!*/ if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN) return -EINVAL; if ((unsigned long)key % AESNI_ALIGN) { /*key is not aligned: use an auxuliar aligned pointer*/ new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL); if (!new_key_mem) return -ENOMEM; new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN); memcpy(new_key_align, key, key_len); key = new_key_align; } if (!irq_fpu_usable()) ret = crypto_aes_expand_key(&(ctx->aes_key_expanded), key, key_len); else { kernel_fpu_begin(); ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len); kernel_fpu_end(); } /*This must be on a 16 byte boundary!*/ if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) { ret = -EINVAL; goto exit; } ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); memcpy(child_ctx, ctx, sizeof(*ctx)); exit: kfree(new_key_mem); return ret; } /* This is the Integrity Check Value (aka the authentication tag length and can * be 8, 12 or 16 bytes long. */ static int rfc4106_set_authsize(struct crypto_aead *parent, unsigned int authsize) { struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); switch (authsize) { case 8: case 12: case 16: break; default: return -EINVAL; } crypto_aead_crt(parent)->authsize = authsize; crypto_aead_crt(cryptd_child)->authsize = authsize; return 0; } static int rfc4106_encrypt(struct aead_request *req) { int ret; struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); if (!irq_fpu_usable()) { struct aead_request *cryptd_req = (struct aead_request *) aead_request_ctx(req); memcpy(cryptd_req, req, sizeof(*req)); aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_aead_encrypt(cryptd_req); } else { struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); kernel_fpu_begin(); ret = cryptd_child->base.crt_aead.encrypt(req); kernel_fpu_end(); return ret; } }
static int tegra_crypto_dev_open(struct inode *inode, struct file *filp) { struct tegra_crypto_ctx *ctx; int ret = 0; ctx = kzalloc(sizeof(struct tegra_crypto_ctx), GFP_KERNEL); if (!ctx) { pr_err("no memory for context\n"); return -ENOMEM; } ctx->ecb_tfm = crypto_alloc_ablkcipher("ecb-aes-tegra", CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 0); if (IS_ERR(ctx->ecb_tfm)) { pr_err("Failed to load transform for ecb-aes-tegra: %ld\n", PTR_ERR(ctx->ecb_tfm)); ret = PTR_ERR(ctx->ecb_tfm); goto fail_ecb; } ctx->cbc_tfm = crypto_alloc_ablkcipher("cbc-aes-tegra", CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 0); if (IS_ERR(ctx->cbc_tfm)) { pr_err("Failed to load transform for cbc-aes-tegra: %ld\n", PTR_ERR(ctx->cbc_tfm)); ret = PTR_ERR(ctx->cbc_tfm); goto fail_cbc; } if (tegra_get_chipid() != TEGRA_CHIPID_TEGRA2) { ctx->ofb_tfm = crypto_alloc_ablkcipher("ofb-aes-tegra", CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 0); if (IS_ERR(ctx->ofb_tfm)) { pr_err("Failed to load transform for ofb-aes-tegra: %ld\n", PTR_ERR(ctx->ofb_tfm)); ret = PTR_ERR(ctx->ofb_tfm); goto fail_ofb; } ctx->ctr_tfm = crypto_alloc_ablkcipher("ctr-aes-tegra", CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 0); if (IS_ERR(ctx->ctr_tfm)) { pr_err("Failed to load transform for ctr-aes-tegra: %ld\n", PTR_ERR(ctx->ctr_tfm)); ret = PTR_ERR(ctx->ctr_tfm); goto fail_ctr; } } if (tegra_get_chipid() != TEGRA_CHIPID_TEGRA2 && tegra_get_chipid() != TEGRA_CHIPID_TEGRA3) { ctx->rng_drbg = crypto_alloc_rng("rng_drbg-aes-tegra", CRYPTO_ALG_TYPE_RNG, 0); if (IS_ERR(ctx->rng_drbg)) { pr_err("Failed to load transform for rng_drbg tegra: %ld\n", PTR_ERR(ctx->rng_drbg)); ret = PTR_ERR(ctx->rng_drbg); goto fail_rng; } } else { ctx->rng = crypto_alloc_rng("rng-aes-tegra", CRYPTO_ALG_TYPE_RNG, 0); if (IS_ERR(ctx->rng)) { pr_err("Failed to load transform for tegra rng: %ld\n", PTR_ERR(ctx->rng)); ret = PTR_ERR(ctx->rng); goto fail_rng; } } ctx->rsa512_tfm = crypto_alloc_ahash("tegra-se-rsa512", CRYPTO_ALG_TYPE_AHASH, 0); if (IS_ERR(ctx->rsa512_tfm)) { pr_err("Failed to load transform for rsa512: %ld\n", PTR_ERR(ctx->rsa512_tfm)); goto fail_rsa512; } ctx->rsa1024_tfm = crypto_alloc_ahash("tegra-se-rsa1024", CRYPTO_ALG_TYPE_AHASH, 0); if (IS_ERR(ctx->rsa1024_tfm)) { pr_err("Failed to load transform for rsa1024: %ld\n", PTR_ERR(ctx->rsa1024_tfm)); goto fail_rsa1024; } ctx->rsa1536_tfm = crypto_alloc_ahash("tegra-se-rsa1536", CRYPTO_ALG_TYPE_AHASH, 0); if (IS_ERR(ctx->rsa1536_tfm)) { pr_err("Failed to load transform for rsa1536: %ld\n", PTR_ERR(ctx->rsa1536_tfm)); goto fail_rsa1536; } ctx->rsa2048_tfm = crypto_alloc_ahash("tegra-se-rsa2048", CRYPTO_ALG_TYPE_AHASH, 0); if (IS_ERR(ctx->rsa2048_tfm)) { pr_err("Failed to load transform for rsa2048: %ld\n", PTR_ERR(ctx->rsa2048_tfm)); goto fail_rsa2048; } filp->private_data = ctx; return ret; fail_rsa2048: crypto_free_ahash(ctx->rsa1536_tfm); fail_rsa1536: crypto_free_ahash(ctx->rsa1024_tfm); fail_rsa1024: crypto_free_ahash(ctx->rsa512_tfm); fail_rsa512: if (tegra_get_chipid() != TEGRA_CHIPID_TEGRA2 && tegra_get_chipid() != TEGRA_CHIPID_TEGRA3) crypto_free_rng(ctx->rng_drbg); else crypto_free_rng(ctx->rng); fail_rng: if (tegra_get_chipid() != TEGRA_CHIPID_TEGRA2) crypto_free_ablkcipher(ctx->ctr_tfm); fail_ctr: if (tegra_get_chipid() != TEGRA_CHIPID_TEGRA2) crypto_free_ablkcipher(ctx->ofb_tfm); fail_ofb: crypto_free_ablkcipher(ctx->cbc_tfm); fail_cbc: crypto_free_ablkcipher(ctx->ecb_tfm); fail_ecb: kfree(ctx); return ret; }
static int ctr_aes_init(struct ctr_drbg_ctx_s *ctx) { int status = 0; ctx->aes_ctx.tfm = crypto_alloc_ablkcipher("qcom-ecb(aes)", 0, 0); if (IS_ERR(ctx->aes_ctx.tfm) || (NULL == ctx->aes_ctx.tfm)) { pr_info("%s: qcom-ecb(aes) failed", __func__); ctx->aes_ctx.tfm = crypto_alloc_ablkcipher("ecb(aes)", 0, 0); pr_info("ctx->aes_ctx.tfm = %p\n", ctx->aes_ctx.tfm); if (IS_ERR(ctx->aes_ctx.tfm) || (NULL == ctx->aes_ctx.tfm)) { pr_err("%s: qcom-ecb(aes) failed\n", __func__); status = -E_FAILURE; goto out; } } ctx->aes_ctx.req = ablkcipher_request_alloc(ctx->aes_ctx.tfm, GFP_KERNEL); if (IS_ERR(ctx->aes_ctx.req) || (NULL == ctx->aes_ctx.req)) { pr_info("%s: Failed to allocate request.\n", __func__); status = -E_FAILURE; goto clr_tfm; } ablkcipher_request_set_callback(ctx->aes_ctx.req, CRYPTO_TFM_REQ_MAY_BACKLOG, _crypto_cipher_test_complete, &ctx->aes_ctx.result); memset(&ctx->aes_ctx.input, 0, sizeof(struct msm_ctr_buffer_s)); memset(&ctx->aes_ctx.output, 0, sizeof(struct msm_ctr_buffer_s)); /* Allocate memory. */ ctx->aes_ctx.input.virt_addr = kmalloc(AES128_BLOCK_SIZE, GFP_KERNEL | __GFP_DMA); if (NULL == ctx->aes_ctx.input.virt_addr) { pr_debug("%s: Failed to input memory.\n", __func__); status = -E_FAILURE; goto clr_req; } ctx->aes_ctx.output.virt_addr = kmalloc(AES128_BLOCK_SIZE, GFP_KERNEL | __GFP_DMA); if (NULL == ctx->aes_ctx.output.virt_addr) { pr_debug("%s: Failed to output memory.\n", __func__); status = -E_FAILURE; goto clr_input; } /*-------------------------------------------------------------------- Set DF AES mode ----------------------------------------------------------------------*/ ctx->df_aes_ctx.tfm = crypto_alloc_ablkcipher("qcom-ecb(aes)", 0, 0); if ((NULL == ctx->df_aes_ctx.tfm) || IS_ERR(ctx->df_aes_ctx.tfm)) { pr_info("%s: qcom-ecb(aes) failed", __func__); ctx->df_aes_ctx.tfm = crypto_alloc_ablkcipher("ecb(aes)", 0, 0); if (IS_ERR(ctx->df_aes_ctx.tfm) || (NULL == ctx->df_aes_ctx.tfm)) { pr_err("%s: ecb(aes) failed", __func__); status = -E_FAILURE; goto clr_output; } } ctx->df_aes_ctx.req = ablkcipher_request_alloc(ctx->df_aes_ctx.tfm, GFP_KERNEL); if (IS_ERR(ctx->df_aes_ctx.req) || (NULL == ctx->df_aes_ctx.req)) { pr_debug(": Failed to allocate request.\n"); status = -E_FAILURE; goto clr_df_tfm; } ablkcipher_request_set_callback(ctx->df_aes_ctx.req, CRYPTO_TFM_REQ_MAY_BACKLOG, _crypto_cipher_test_complete, &ctx->df_aes_ctx.result); memset(&ctx->df_aes_ctx.input, 0, sizeof(struct msm_ctr_buffer_s)); memset(&ctx->df_aes_ctx.output, 0, sizeof(struct msm_ctr_buffer_s)); ctx->df_aes_ctx.input.virt_addr = kmalloc(AES128_BLOCK_SIZE, GFP_KERNEL | __GFP_DMA); if (NULL == ctx->df_aes_ctx.input.virt_addr) { pr_debug(": Failed to input memory.\n"); status = -E_FAILURE; goto clr_df_req; } ctx->df_aes_ctx.output.virt_addr = kmalloc(AES128_BLOCK_SIZE, GFP_KERNEL | __GFP_DMA); if (NULL == ctx->df_aes_ctx.output.virt_addr) { pr_debug(": Failed to output memory.\n"); status = -E_FAILURE; goto clr_df_input; } goto out; clr_df_input: if (ctx->df_aes_ctx.input.virt_addr) { kzfree(ctx->df_aes_ctx.input.virt_addr); ctx->df_aes_ctx.input.virt_addr = NULL; } clr_df_req: if (ctx->df_aes_ctx.req) { ablkcipher_request_free(ctx->df_aes_ctx.req); ctx->df_aes_ctx.req = NULL; } clr_df_tfm: if (ctx->df_aes_ctx.tfm) { crypto_free_ablkcipher(ctx->df_aes_ctx.tfm); ctx->df_aes_ctx.tfm = NULL; } clr_output: if (ctx->aes_ctx.output.virt_addr) { kzfree(ctx->aes_ctx.output.virt_addr); ctx->aes_ctx.output.virt_addr = NULL; } clr_input: if (ctx->aes_ctx.input.virt_addr) { kzfree(ctx->aes_ctx.input.virt_addr); ctx->aes_ctx.input.virt_addr = NULL; } clr_req: if (ctx->aes_ctx.req) { ablkcipher_request_free(ctx->aes_ctx.req); ctx->aes_ctx.req = NULL; } clr_tfm: if (ctx->aes_ctx.tfm) { crypto_free_ablkcipher(ctx->aes_ctx.tfm); ctx->aes_ctx.tfm = NULL; } out: return status; }
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), .tweak_fn = aesni_xts_tweak, .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), .crypt_fn = lrw_xts_encrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = xts_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), .tweak_fn = aesni_xts_tweak, .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), .crypt_fn = lrw_xts_decrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = xts_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } #endif #ifdef CONFIG_X86_64 static int rfc4106_init(struct crypto_tfm *tfm) { struct cryptd_aead *cryptd_tfm; struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); struct crypto_aead *cryptd_child; struct aesni_rfc4106_gcm_ctx *child_ctx; cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", CRYPTO_ALG_INTERNAL, CRYPTO_ALG_INTERNAL); if (IS_ERR(cryptd_tfm)) return PTR_ERR(cryptd_tfm); cryptd_child = cryptd_aead_child(cryptd_tfm); child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); memcpy(child_ctx, ctx, sizeof(*ctx)); ctx->cryptd_tfm = cryptd_tfm; tfm->crt_aead.reqsize = sizeof(struct aead_request) + crypto_aead_reqsize(&cryptd_tfm->base); return 0; } static void rfc4106_exit(struct crypto_tfm *tfm) { struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); if (!IS_ERR(ctx->cryptd_tfm)) cryptd_free_aead(ctx->cryptd_tfm); return; } static void rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err) { struct aesni_gcm_set_hash_subkey_result *result = req->data; if (err == -EINPROGRESS) return; result->err = err; complete(&result->completion); } static int rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) { struct crypto_ablkcipher *ctr_tfm; struct ablkcipher_request *req; int ret = -EINVAL; struct aesni_hash_subkey_req_data *req_data; ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0); if (IS_ERR(ctr_tfm)) return PTR_ERR(ctr_tfm); crypto_ablkcipher_clear_flags(ctr_tfm, ~0); ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len); if (ret) goto out_free_ablkcipher; ret = -ENOMEM; req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL); if (!req) goto out_free_ablkcipher; req_data = kmalloc(sizeof(*req_data), GFP_KERNEL); if (!req_data) goto out_free_request; memset(req_data->iv, 0, sizeof(req_data->iv)); /* Clear the data in the hash sub key container to zero.*/ /* We want to cipher all zeros to create the hash sub key. */ memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE); init_completion(&req_data->result.completion); sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE); ablkcipher_request_set_tfm(req, ctr_tfm); ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, rfc4106_set_hash_subkey_done, &req_data->result); ablkcipher_request_set_crypt(req, &req_data->sg, &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv); ret = crypto_ablkcipher_encrypt(req); if (ret == -EINPROGRESS || ret == -EBUSY) { ret = wait_for_completion_interruptible (&req_data->result.completion); if (!ret) ret = req_data->result.err; } kfree(req_data); out_free_request: ablkcipher_request_free(req); out_free_ablkcipher: crypto_free_ablkcipher(ctr_tfm); return ret; } static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key, unsigned int key_len) { int ret = 0; struct crypto_tfm *tfm = crypto_aead_tfm(aead); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead); u8 *new_key_align, *new_key_mem = NULL; if (key_len < 4) { crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } /*Account for 4 byte nonce at the end.*/ key_len -= 4; if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && key_len != AES_KEYSIZE_256) { crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce)); /*This must be on a 16 byte boundary!*/ if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN) return -EINVAL; if ((unsigned long)key % AESNI_ALIGN) { /*key is not aligned: use an auxuliar aligned pointer*/ new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL); if (!new_key_mem) return -ENOMEM; new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN); memcpy(new_key_align, key, key_len); key = new_key_align; } if (!irq_fpu_usable()) ret = crypto_aes_expand_key(&(ctx->aes_key_expanded), key, key_len); else { kernel_fpu_begin(); ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len); kernel_fpu_end(); } /*This must be on a 16 byte boundary!*/ if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) { ret = -EINVAL; goto exit; } ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); exit: kfree(new_key_mem); return ret; } static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, unsigned int key_len) { struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); struct crypto_aead *child = cryptd_aead_child(ctx->cryptd_tfm); struct aesni_rfc4106_gcm_ctx *c_ctx = aesni_rfc4106_gcm_ctx_get(child); struct cryptd_aead *cryptd_tfm = ctx->cryptd_tfm; int ret; ret = crypto_aead_setkey(child, key, key_len); if (!ret) { memcpy(ctx, c_ctx, sizeof(*ctx)); ctx->cryptd_tfm = cryptd_tfm; } return ret; } static int common_rfc4106_set_authsize(struct crypto_aead *aead, unsigned int authsize) { switch (authsize) { case 8: case 12: case 16: break; default: return -EINVAL; } crypto_aead_crt(aead)->authsize = authsize; return 0; } /* This is the Integrity Check Value (aka the authentication tag length and can * be 8, 12 or 16 bytes long. */ static int rfc4106_set_authsize(struct crypto_aead *parent, unsigned int authsize) { struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); struct crypto_aead *child = cryptd_aead_child(ctx->cryptd_tfm); int ret; ret = crypto_aead_setauthsize(child, authsize); if (!ret) crypto_aead_crt(parent)->authsize = authsize; return ret; } static int __driver_rfc4106_encrypt(struct aead_request *req) { u8 one_entry_in_sg = 0; u8 *src, *dst, *assoc; __be32 counter = cpu_to_be32(1); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); u32 key_len = ctx->aes_key_expanded.key_length; void *aes_ctx = &(ctx->aes_key_expanded); unsigned long auth_tag_len = crypto_aead_authsize(tfm); u8 iv_tab[16+AESNI_ALIGN]; u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN); struct scatter_walk src_sg_walk; struct scatter_walk assoc_sg_walk; struct scatter_walk dst_sg_walk; unsigned int i; /* Assuming we are supporting rfc4106 64-bit extended */ /* sequence numbers We need to have the AAD length equal */ /* to 8 or 12 bytes */ if (unlikely(req->assoclen != 8 && req->assoclen != 12)) return -EINVAL; if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16)) return -EINVAL; if (unlikely(key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && key_len != AES_KEYSIZE_256)) return -EINVAL; /* IV below built */ for (i = 0; i < 4; i++) *(iv+i) = ctx->nonce[i]; for (i = 0; i < 8; i++) *(iv+4+i) = req->iv[i]; *((__be32 *)(iv+12)) = counter; if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) { one_entry_in_sg = 1; scatterwalk_start(&src_sg_walk, req->src); scatterwalk_start(&assoc_sg_walk, req->assoc); src = scatterwalk_map(&src_sg_walk); assoc = scatterwalk_map(&assoc_sg_walk); dst = src; if (unlikely(req->src != req->dst)) { scatterwalk_start(&dst_sg_walk, req->dst); dst = scatterwalk_map(&dst_sg_walk); } } else { /* Allocate memory for src, dst, assoc */ src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen, GFP_ATOMIC); if (unlikely(!src)) return -ENOMEM; assoc = (src + req->cryptlen + auth_tag_len); scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); scatterwalk_map_and_copy(assoc, req->assoc, 0, req->assoclen, 0); dst = src; } aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv, ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst + ((unsigned long)req->cryptlen), auth_tag_len); /* The authTag (aka the Integrity Check Value) needs to be written * back to the packet. */ if (one_entry_in_sg) { if (unlikely(req->src != req->dst)) { scatterwalk_unmap(dst); scatterwalk_done(&dst_sg_walk, 0, 0); } scatterwalk_unmap(src); scatterwalk_unmap(assoc); scatterwalk_done(&src_sg_walk, 0, 0); scatterwalk_done(&assoc_sg_walk, 0, 0); } else { scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen + auth_tag_len, 1); kfree(src); } return 0; }
VOS_STATUS vos_decrypt_AES(v_U32_t cryptHandle, /* Handle */ v_U8_t *pText, /* pointer to data stream */ v_U8_t *pDecrypted, v_U8_t *pKey) /* pointer to authentication key */ { // VOS_STATUS uResult = VOS_STATUS_E_FAILURE; struct ecb_aes_result result; struct ablkcipher_request *req; struct crypto_ablkcipher *tfm; int ret = 0; char iv[IV_SIZE_AES_128]; struct scatterlist sg_in; struct scatterlist sg_out; init_completion(&result.completion); #if !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO)) tfm = crypto_alloc_ablkcipher( "cbc(aes)", 0, 0); #else tfm = wcnss_wlan_crypto_alloc_ablkcipher( "cbc(aes)", 0, 0); #endif if (IS_ERR(tfm)) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_alloc_ablkcipher failed"); ret = PTR_ERR(tfm); goto err_tfm; } req = ablkcipher_request_alloc(tfm, GFP_KERNEL); if (!req) { VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "Failed to allocate request for cbc(aes)"); ret = -ENOMEM; goto err_req; } ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, ecb_aes_complete, &result); crypto_ablkcipher_clear_flags(tfm, ~0); ret = crypto_ablkcipher_setkey(tfm, pKey, AES_KEYSIZE_128); if (ret) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_cipher_setkey failed"); goto err_setkey; } memset(iv, 0, IV_SIZE_AES_128); sg_init_one(&sg_in, pText, AES_BLOCK_SIZE); sg_init_one(&sg_out, pDecrypted, AES_BLOCK_SIZE); ablkcipher_request_set_crypt(req, &sg_in, &sg_out, AES_BLOCK_SIZE, iv); crypto_ablkcipher_decrypt(req); // ------------------------------------- err_setkey: #if !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO)) ablkcipher_request_free(req); #else wcnss_wlan_ablkcipher_request_free(req); #endif err_req: #if !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO)) crypto_free_ablkcipher(tfm); #else wcnss_wlan_crypto_free_ablkcipher(tfm); #endif err_tfm: //return ret; if (ret != 0) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR,"%s() call failed", __func__); return VOS_STATUS_E_FAULT; } return VOS_STATUS_SUCCESS; }
static int test_acipher(const char *algo, int enc, char *data_in, char *data_out, size_t data_len, char *key, int keysize) { struct crypto_ablkcipher *tfm; struct tcrypt_result tresult; struct ablkcipher_request *req; struct scatterlist sg[TVMEMSIZE]; unsigned int ret, i, j, iv_len; char iv[128]; ret = -EAGAIN; init_completion(&tresult.completion); tfm = crypto_alloc_ablkcipher(algo, 0, 0); if (IS_ERR(tfm)) { printk(KERN_ERR "failed to load transform for %s: %ld\n", algo, PTR_ERR(tfm)); return ret; } req = ablkcipher_request_alloc(tfm, GFP_KERNEL); if (!req) { printk(KERN_ERR "tcrypt: skcipher: Failed to allocate request for %s\n", algo); goto out; } ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, tcrypt_complete, &tresult); crypto_ablkcipher_clear_flags(tfm, ~0); ret = crypto_ablkcipher_setkey(tfm, key, keysize); if (ret) { printk(KERN_ERR "setkey() failed flags=%x\n", crypto_ablkcipher_get_flags(tfm)); goto out_free_req; } printk(KERN_INFO "KEY:\n"); hexdump(key, keysize); sg_init_table(sg, TVMEMSIZE); i = 0; j = data_len; while (j > PAGE_SIZE) { sg_set_buf(sg + i, tvmem[i], PAGE_SIZE); memcpy(tvmem[i], data_in + i * PAGE_SIZE, PAGE_SIZE); i++; j -= PAGE_SIZE; } sg_set_buf(sg + i, tvmem[i], j); memcpy(tvmem[i], data_in + i * PAGE_SIZE, j); iv_len = crypto_ablkcipher_ivsize(tfm); memcpy(iv, iv16, iv_len); printk(KERN_INFO "IV:\n"); hexdump(iv, iv_len); ablkcipher_request_set_crypt(req, sg, sg, data_len, iv); printk(KERN_INFO "IN:\n"); hexdump(data_in, data_len); if (enc) ret = do_one_acipher_op(req, crypto_ablkcipher_encrypt(req)); else ret = do_one_acipher_op(req, crypto_ablkcipher_decrypt(req)); if (ret) printk(KERN_ERR "failed flags=%x\n", crypto_ablkcipher_get_flags(tfm)); else { i = 0; j = data_len; while (j > PAGE_SIZE) { memcpy(data_out + i * PAGE_SIZE, tvmem[i], PAGE_SIZE); i++; j -= PAGE_SIZE; } memcpy(data_out + i * PAGE_SIZE, tvmem[i], j); printk(KERN_INFO "OUT:\n"); hexdump(data_out, data_len); } out_free_req: ablkcipher_request_free(req); out: crypto_free_ablkcipher(tfm); return ret; }
int _ext4_get_encryption_info(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_crypt_info *crypt_info; char full_key_descriptor[EXT4_KEY_DESC_PREFIX_SIZE + (EXT4_KEY_DESCRIPTOR_SIZE * 2) + 1]; struct key *keyring_key = NULL; struct ext4_encryption_key *master_key; struct ext4_encryption_context ctx; struct user_key_payload *ukp; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct crypto_ablkcipher *ctfm; const char *cipher_str; char raw_key[EXT4_MAX_KEY_SIZE]; char mode; int res; if (!ext4_read_workqueue) { res = ext4_init_crypto(); if (res) return res; } retry: crypt_info = ACCESS_ONCE(ei->i_crypt_info); if (crypt_info) { if (!crypt_info->ci_keyring_key || key_validate(crypt_info->ci_keyring_key) == 0) return 0; ext4_free_encryption_info(inode, crypt_info); goto retry; } res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION, EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, sizeof(ctx)); if (res < 0) { if (!DUMMY_ENCRYPTION_ENABLED(sbi)) return res; ctx.contents_encryption_mode = EXT4_ENCRYPTION_MODE_AES_256_XTS; ctx.filenames_encryption_mode = EXT4_ENCRYPTION_MODE_AES_256_CTS; ctx.flags = 0; } else if (res != sizeof(ctx)) return -EINVAL; res = 0; crypt_info = kmem_cache_alloc(ext4_crypt_info_cachep, GFP_KERNEL); if (!crypt_info) return -ENOMEM; crypt_info->ci_flags = ctx.flags; crypt_info->ci_data_mode = ctx.contents_encryption_mode; crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; crypt_info->ci_ctfm = NULL; crypt_info->ci_keyring_key = NULL; memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, sizeof(crypt_info->ci_master_key)); if (S_ISREG(inode->i_mode)) mode = crypt_info->ci_data_mode; else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) mode = crypt_info->ci_filename_mode; else BUG(); switch (mode) { case EXT4_ENCRYPTION_MODE_AES_256_XTS: cipher_str = "xts(aes)"; break; case EXT4_ENCRYPTION_MODE_AES_256_CTS: cipher_str = "cts(cbc(aes))"; break; default: printk_once(KERN_WARNING "ext4: unsupported key mode %d (ino %u)\n", mode, (unsigned) inode->i_ino); res = -ENOKEY; goto out; } if (DUMMY_ENCRYPTION_ENABLED(sbi)) { memset(raw_key, 0x42, EXT4_AES_256_XTS_KEY_SIZE); goto got_key; } memcpy(full_key_descriptor, EXT4_KEY_DESC_PREFIX, EXT4_KEY_DESC_PREFIX_SIZE); sprintf(full_key_descriptor + EXT4_KEY_DESC_PREFIX_SIZE, "%*phN", EXT4_KEY_DESCRIPTOR_SIZE, ctx.master_key_descriptor); full_key_descriptor[EXT4_KEY_DESC_PREFIX_SIZE + (2 * EXT4_KEY_DESCRIPTOR_SIZE)] = '\0'; keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL); if (IS_ERR(keyring_key)) { res = PTR_ERR(keyring_key); keyring_key = NULL; goto out; } crypt_info->ci_keyring_key = keyring_key; if (keyring_key->type != &key_type_logon) { printk_once(KERN_WARNING "ext4: key type must be logon\n"); res = -ENOKEY; goto out; } ukp = ((struct user_key_payload *)keyring_key->payload.data); if (ukp->datalen != sizeof(struct ext4_encryption_key)) { res = -EINVAL; goto out; } master_key = (struct ext4_encryption_key *)ukp->data; BUILD_BUG_ON(EXT4_AES_128_ECB_KEY_SIZE != EXT4_KEY_DERIVATION_NONCE_SIZE); if (master_key->size != EXT4_AES_256_XTS_KEY_SIZE) { printk_once(KERN_WARNING "ext4: key size incorrect: %d\n", master_key->size); res = -ENOKEY; goto out; } res = ext4_derive_key_aes(ctx.nonce, master_key->raw, raw_key); got_key: ctfm = crypto_alloc_ablkcipher(cipher_str, 0, 0); if (!ctfm || IS_ERR(ctfm)) { res = ctfm ? PTR_ERR(ctfm) : -ENOMEM; printk(KERN_DEBUG "%s: error %d (inode %u) allocating crypto tfm\n", __func__, res, (unsigned) inode->i_ino); goto out; } crypt_info->ci_ctfm = ctfm; crypto_ablkcipher_clear_flags(ctfm, ~0); crypto_tfm_set_flags(crypto_ablkcipher_tfm(ctfm), CRYPTO_TFM_REQ_WEAK_KEY); res = crypto_ablkcipher_setkey(ctfm, raw_key, ext4_encryption_key_size(mode)); if (res) goto out; memzero_explicit(raw_key, sizeof(raw_key)); if (cmpxchg(&ei->i_crypt_info, NULL, crypt_info) != NULL) { ext4_free_crypt_info(crypt_info); goto retry; } return 0; out: if (res == -ENOKEY) res = 0; ext4_free_crypt_info(crypt_info); memzero_explicit(raw_key, sizeof(raw_key)); return res; }
void mmc_decrypt_req(struct mmc_host *host, struct mmc_request *mrq) { struct crypto_ablkcipher *tfm; struct ablkcipher_request *req; struct mmc_tcrypt_result result; struct scatterlist *in_sg = mrq->data->sg; int rc = 0; u8 IV[MMC_AES_XTS_IV_LEN]; sector_t sector = mrq->data->sector; tfm = crypto_alloc_ablkcipher("xts(aes)", 0, 0); if (IS_ERR(tfm)) { pr_err("%s:%s ablkcipher tfm allocation failed : error = %lu\n", mmc_hostname(host), __func__, PTR_ERR(tfm)); return; } req = ablkcipher_request_alloc(tfm, GFP_KERNEL); if (!req) { pr_err("%s:%s ablkcipher request allocation failed\n", mmc_hostname(host), __func__); goto ablkcipher_req_alloc_failure; } ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, mmc_crypto_cipher_complete, &result); init_completion(&result.completion); qcrypto_cipher_set_flag(req, QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B); crypto_ablkcipher_clear_flags(tfm, ~0); crypto_ablkcipher_setkey(tfm, NULL, MMC_KEY_SIZE_XTS); memset(IV, 0, MMC_AES_XTS_IV_LEN); memcpy(IV, §or, sizeof(sector_t)); ablkcipher_request_set_crypt(req, in_sg, in_sg, mrq->data->blksz * mrq->data->blocks, (void *) IV); rc = crypto_ablkcipher_decrypt(req); switch (rc) { case 0: break; case -EBUSY: /* * Lets make this synchronous request by waiting on * in progress as well */ case -EINPROGRESS: wait_for_completion_interruptible(&result.completion); if (result.err) pr_err("%s:%s error = %d decrypting the request\n", mmc_hostname(host), __func__, result.err); break; default: goto crypto_operation_failure; } crypto_operation_failure: ablkcipher_request_free(req); ablkcipher_req_alloc_failure: crypto_free_ablkcipher(tfm); return; }
void mmc_encrypt_req(struct mmc_host *host, struct mmc_request *mrq) { struct crypto_ablkcipher *tfm; struct ablkcipher_request *req; struct mmc_tcrypt_result result; struct scatterlist *in_sg = mrq->data->sg; struct scatterlist *out_sg = NULL; u8 *dst_data = NULL; unsigned long data_len = 0; uint32_t bytes = 0; int rc = 0; u8 IV[MMC_AES_XTS_IV_LEN]; sector_t sector = mrq->data->sector; tfm = crypto_alloc_ablkcipher("xts(aes)", 0, 0); if (IS_ERR(tfm)) { pr_err("%s:%s ablkcipher tfm allocation failed : error = %lu\n", mmc_hostname(host), __func__, PTR_ERR(tfm)); return; } req = ablkcipher_request_alloc(tfm, GFP_KERNEL); if (!req) { pr_err("%s:%s ablkcipher request allocation failed\n", mmc_hostname(host), __func__); goto ablkcipher_req_alloc_failure; } ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, mmc_crypto_cipher_complete, &result); init_completion(&result.completion); qcrypto_cipher_set_flag(req, QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B); crypto_ablkcipher_clear_flags(tfm, ~0); crypto_ablkcipher_setkey(tfm, NULL, MMC_KEY_SIZE_XTS); data_len = mrq->data->blksz * mrq->data->blocks; if (data_len > MMC_512_KB) { pr_err("%s:%s Encryption operation aborted: req size > 512K\n", mmc_hostname(host), __func__); goto crypto_operation_failure; } if (mmc_crypto_buf_idx != MAX_ENCRYPTION_BUFFERS) { dst_data = mmc_crypto_bufs[mmc_crypto_buf_idx]; out_sg = mmc_crypto_out_sg[mmc_crypto_buf_idx]; mmc_crypto_buf_idx = 1-mmc_crypto_buf_idx; } else { pr_err("%s:%s encryption buffers not available\n", mmc_hostname(host), __func__); goto crypto_operation_failure; } bytes = sg_copy_to_buffer(in_sg, mrq->data->sg_len, dst_data, data_len); if (bytes != data_len) { pr_err("%s:%s error in copying data from sglist to buffer\n", mmc_hostname(host), __func__); goto crypto_operation_failure; } if (!mmc_copy_sglist(in_sg, mrq->data->sg_len, out_sg, dst_data)) { pr_err("%s:%s could not create dst sglist from in sglist\n", mmc_hostname(host), __func__); goto crypto_operation_failure; } memset(IV, 0, MMC_AES_XTS_IV_LEN); memcpy(IV, §or, sizeof(sector_t)); ablkcipher_request_set_crypt(req, in_sg, out_sg, data_len, (void *) IV); rc = crypto_ablkcipher_encrypt(req); switch (rc) { case 0: break; case -EBUSY: /* * Lets make this synchronous request by waiting on * in progress as well */ case -EINPROGRESS: wait_for_completion_interruptible(&result.completion); if (result.err) pr_err("%s:%s error = %d encrypting the request\n", mmc_hostname(host), __func__, result.err); break; default: goto crypto_operation_failure; } mrq->data->sg = out_sg; mrq->data->sg_len = mmc_count_sg(out_sg, data_len); crypto_operation_failure: ablkcipher_request_free(req); ablkcipher_req_alloc_failure: crypto_free_ablkcipher(tfm); return; }
static int run_test_case(const struct eme2_test_case *c, unsigned int number) { int err = 0; int failed = 0; struct crypto_ablkcipher *cipher = NULL; struct ablkcipher_request *req = NULL; u8 *buffer = NULL; struct scatterlist sg[1]; buffer = kmalloc(c->plaintext_len, GFP_KERNEL); if (!buffer) { printk("eme2_test: ERROR allocating buffer!\n"); goto out; } sg_init_one(sg, buffer, c->plaintext_len); cipher = crypto_alloc_ablkcipher("eme2(aes)", 0, 0); if (IS_ERR(cipher)) { printk("eme2_test: ERROR allocating cipher!\n"); err = PTR_ERR(cipher); cipher = NULL; goto out; } err = crypto_ablkcipher_setkey(cipher, c->key, c->key_len); if (err) { printk("eme2_test: ERROR setting key!\n"); goto out; } req = ablkcipher_request_alloc(cipher, GFP_KERNEL); if (IS_ERR(req)) { printk("eme2_test: ERROR allocating request!\n"); err = PTR_ERR(req); req = NULL; goto out; } ablkcipher_request_set_tfm(req, cipher); ablkcipher_request_set_crypt(req, sg, sg, c->plaintext_len, (u8 *)c->assoc_data); memcpy(buffer, c->plaintext, c->plaintext_len); err = eme2_encrypt_sync(req, c->assoc_data_len); if (err) { printk("eme2_test: ERROR encrypting!\n"); goto out; } if (memcmp(buffer, c->ciphertext, c->plaintext_len) != 0) { failed += 1; printk("eme2_test: encryption-%u: Testcase failed!\n", number); } memcpy(buffer, c->ciphertext, c->plaintext_len); err = eme2_decrypt_sync(req, c->assoc_data_len); if (err) { printk("eme2_test: ERROR decrypting!\n"); goto out; } if (memcmp(buffer, c->plaintext, c->plaintext_len) != 0) { failed += 1; printk("eme2_test: decryption-%u: Testcase failed!\n", number); } out: if (buffer) kfree(buffer); if (cipher) crypto_free_ablkcipher(cipher); if (req) ablkcipher_request_free(req); return err < 0 ? err : failed; }
int cryptodev_cipher_init(struct cipher_data *out, const char *alg_name, uint8_t *keyp, size_t keylen, int stream, int aead) { int ret; if (aead == 0) { struct ablkcipher_alg *alg; out->async.s = crypto_alloc_ablkcipher(alg_name, 0, 0); if (unlikely(IS_ERR(out->async.s))) { ddebug(1, "Failed to load cipher %s", alg_name); return -EINVAL; } alg = crypto_ablkcipher_alg(out->async.s); if (alg != NULL) { /* Was correct key length supplied? */ if (alg->max_keysize > 0 && unlikely((keylen < alg->min_keysize) || (keylen > alg->max_keysize))) { ddebug(1, "Wrong keylen '%zu' for algorithm '%s'. Use %u to %u.", keylen, alg_name, alg->min_keysize, alg->max_keysize); ret = -EINVAL; goto error; } } out->blocksize = crypto_ablkcipher_blocksize(out->async.s); out->ivsize = crypto_ablkcipher_ivsize(out->async.s); out->alignmask = crypto_ablkcipher_alignmask(out->async.s); ret = crypto_ablkcipher_setkey(out->async.s, keyp, keylen); } else { out->async.as = crypto_alloc_aead(alg_name, 0, 0); if (unlikely(IS_ERR(out->async.as))) { ddebug(1, "Failed to load cipher %s", alg_name); return -EINVAL; } out->blocksize = crypto_aead_blocksize(out->async.as); out->ivsize = crypto_aead_ivsize(out->async.as); out->alignmask = crypto_aead_alignmask(out->async.as); ret = crypto_aead_setkey(out->async.as, keyp, keylen); } if (unlikely(ret)) { ddebug(1, "Setting key failed for %s-%zu.", alg_name, keylen*8); ret = -EINVAL; goto error; } out->stream = stream; out->aead = aead; out->async.result = kzalloc(sizeof(*out->async.result), GFP_KERNEL); if (unlikely(!out->async.result)) { ret = -ENOMEM; goto error; } init_completion(&out->async.result->completion); if (aead == 0) { out->async.request = ablkcipher_request_alloc(out->async.s, GFP_KERNEL); if (unlikely(!out->async.request)) { derr(1, "error allocating async crypto request"); ret = -ENOMEM; goto error; } ablkcipher_request_set_callback(out->async.request, CRYPTO_TFM_REQ_MAY_BACKLOG, cryptodev_complete, out->async.result); } else { out->async.arequest = aead_request_alloc(out->async.as, GFP_KERNEL); if (unlikely(!out->async.arequest)) { derr(1, "error allocating async crypto request"); ret = -ENOMEM; goto error; } aead_request_set_callback(out->async.arequest, CRYPTO_TFM_REQ_MAY_BACKLOG, cryptodev_complete, out->async.result); } out->init = 1; return 0; error: if (aead == 0) { if (out->async.request) ablkcipher_request_free(out->async.request); if (out->async.s) crypto_free_ablkcipher(out->async.s); } else { if (out->async.arequest) aead_request_free(out->async.arequest); if (out->async.as) crypto_free_aead(out->async.as); } kfree(out->async.result); return ret; }
struct crypto_ablkcipher * wcnss_wlan_crypto_alloc_ablkcipher(const char *alg_name, u32 type, u32 mask) { return crypto_alloc_ablkcipher(alg_name, type, mask); }
int get_crypt_info(struct inode *inode) { struct fscrypt_info *crypt_info; u8 full_key_descriptor[FS_KEY_DESC_PREFIX_SIZE + (FS_KEY_DESCRIPTOR_SIZE * 2) + 1]; struct key *keyring_key = NULL; struct fscrypt_key *master_key; struct fscrypt_context ctx; struct user_key_payload *ukp; struct crypto_ablkcipher *ctfm; const char *cipher_str; u8 raw_key[FS_MAX_KEY_SIZE]; u8 mode; int res; res = fscrypt_initialize(); if (res) return res; if (!inode->i_sb->s_cop->get_context) return -EOPNOTSUPP; retry: crypt_info = ACCESS_ONCE(inode->i_crypt_info); if (crypt_info) { if (!crypt_info->ci_keyring_key || key_validate(crypt_info->ci_keyring_key) == 0) return 0; fscrypt_put_encryption_info(inode, crypt_info); goto retry; } res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); if (res < 0) { if (!fscrypt_dummy_context_enabled(inode)) return res; ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS; ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS; ctx.flags = 0; } else if (res != sizeof(ctx)) { return -EINVAL; } res = 0; crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS); if (!crypt_info) return -ENOMEM; crypt_info->ci_flags = ctx.flags; crypt_info->ci_data_mode = ctx.contents_encryption_mode; crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; crypt_info->ci_ctfm = NULL; crypt_info->ci_keyring_key = NULL; memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, sizeof(crypt_info->ci_master_key)); if (S_ISREG(inode->i_mode)) mode = crypt_info->ci_data_mode; else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) mode = crypt_info->ci_filename_mode; else BUG(); switch (mode) { case FS_ENCRYPTION_MODE_AES_256_XTS: cipher_str = "xts(aes)"; break; case FS_ENCRYPTION_MODE_AES_256_CTS: cipher_str = "cts(cbc(aes))"; break; default: printk_once(KERN_WARNING "%s: unsupported key mode %d (ino %u)\n", __func__, mode, (unsigned) inode->i_ino); res = -ENOKEY; goto out; } if (fscrypt_dummy_context_enabled(inode)) { memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE); goto got_key; } memcpy(full_key_descriptor, FS_KEY_DESC_PREFIX, FS_KEY_DESC_PREFIX_SIZE); sprintf(full_key_descriptor + FS_KEY_DESC_PREFIX_SIZE, "%*phN", FS_KEY_DESCRIPTOR_SIZE, ctx.master_key_descriptor); full_key_descriptor[FS_KEY_DESC_PREFIX_SIZE + (2 * FS_KEY_DESCRIPTOR_SIZE)] = '\0'; keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL); if (IS_ERR(keyring_key)) { res = PTR_ERR(keyring_key); keyring_key = NULL; goto out; } crypt_info->ci_keyring_key = keyring_key; if (keyring_key->type != &key_type_logon) { printk_once(KERN_WARNING "%s: key type must be logon\n", __func__); res = -ENOKEY; goto out; } down_read(&keyring_key->sem); ukp = ((struct user_key_payload *)keyring_key->payload.data); if (ukp->datalen != sizeof(struct fscrypt_key)) { res = -EINVAL; up_read(&keyring_key->sem); goto out; } master_key = (struct fscrypt_key *)ukp->data; BUILD_BUG_ON(FS_AES_128_ECB_KEY_SIZE != FS_KEY_DERIVATION_NONCE_SIZE); if (master_key->size != FS_AES_256_XTS_KEY_SIZE) { printk_once(KERN_WARNING "%s: key size incorrect: %d\n", __func__, master_key->size); res = -ENOKEY; up_read(&keyring_key->sem); goto out; } res = derive_key_aes(ctx.nonce, master_key->raw, raw_key); up_read(&keyring_key->sem); if (res) goto out; got_key: ctfm = crypto_alloc_ablkcipher(cipher_str, 0, 0); if (!ctfm || IS_ERR(ctfm)) { res = ctfm ? PTR_ERR(ctfm) : -ENOMEM; printk(KERN_DEBUG "%s: error %d (inode %u) allocating crypto tfm\n", __func__, res, (unsigned) inode->i_ino); goto out; } crypt_info->ci_ctfm = ctfm; crypto_ablkcipher_clear_flags(ctfm, ~0); crypto_tfm_set_flags(crypto_ablkcipher_tfm(ctfm), CRYPTO_TFM_REQ_WEAK_KEY); res = crypto_ablkcipher_setkey(ctfm, raw_key, fscrypt_key_size(mode)); if (res) goto out; memzero_explicit(raw_key, sizeof(raw_key)); if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) { put_crypt_info(crypt_info); goto retry; } return 0; out: if (res == -ENOKEY) res = 0; put_crypt_info(crypt_info); memzero_explicit(raw_key, sizeof(raw_key)); return res; }