static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm); u8 ckey[2 * AES_MAX_KEY_SIZE]; unsigned int ckey_len; memcpy(ctx->sk[0].seckey, in_key, SECKEYBLOBSIZE); memcpy(ctx->sk[1].seckey, in_key + SECKEYBLOBSIZE, SECKEYBLOBSIZE); if (__xts_paes_set_key(ctx)) { tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } /* * xts_check_key verifies the key length is not odd and makes * sure that the two keys are not the same. This can be done * on the two protected keys as well */ ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? AES_KEYSIZE_128 : AES_KEYSIZE_256; memcpy(ckey, ctx->pk[0].protkey, ckey_len); memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len); return xts_check_key(tfm, ckey, 2*ckey_len); }
static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct crypto_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); int ret; ret = xts_check_key(tfm, in_key, key_len); if (ret) return ret; ret = ce_aes_expandkey(&ctx->key1, in_key, key_len / 2); if (!ret) ret = ce_aes_expandkey(&ctx->key2, &in_key[key_len / 2], key_len / 2); if (!ret) return 0; tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; }
static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { int ret; struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); ret = xts_check_key(tfm, key, keylen); if (ret) return ret; preempt_disable(); pagefault_disable(); enable_kernel_vsx(); ret = aes_p8_set_encrypt_key(key + keylen/2, (keylen/2) * 8, &ctx->tweak_key); ret += aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key); ret += aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key); disable_kernel_vsx(); pagefault_enable(); preempt_enable(); ret += crypto_skcipher_setkey(ctx->fallback, key, keylen); return ret; }
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct cast6_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[CAST6_PARALLEL_BLOCKS]; struct crypt_priv crypt_ctx = { .ctx = &ctx->cast6_ctx, .fpu_enabled = false, }; struct lrw_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .table_ctx = &ctx->lrw_table, .crypt_ctx = &crypt_ctx, .crypt_fn = encrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ret = lrw_crypt(desc, dst, src, nbytes, &req); cast6_fpu_end(crypt_ctx.fpu_enabled); return ret; } static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct cast6_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[CAST6_PARALLEL_BLOCKS]; struct crypt_priv crypt_ctx = { .ctx = &ctx->cast6_ctx, .fpu_enabled = false, }; struct lrw_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .table_ctx = &ctx->lrw_table, .crypt_ctx = &crypt_ctx, .crypt_fn = decrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ret = lrw_crypt(desc, dst, src, nbytes, &req); cast6_fpu_end(crypt_ctx.fpu_enabled); return ret; } static void lrw_exit_tfm(struct crypto_tfm *tfm) { struct cast6_lrw_ctx *ctx = crypto_tfm_ctx(tfm); lrw_free_table(&ctx->lrw_table); } struct cast6_xts_ctx { struct cast6_ctx tweak_ctx; struct cast6_ctx crypt_ctx; }; static int xts_cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct cast6_xts_ctx *ctx = crypto_tfm_ctx(tfm); u32 *flags = &tfm->crt_flags; int err; err = xts_check_key(tfm, key, keylen); if (err) return err; /* first half of xts-key is for crypt */ err = __cast6_setkey(&ctx->crypt_ctx, key, keylen / 2, flags); if (err) return err; /* second half of xts-key is for tweak */ return __cast6_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2, flags); } static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct cast6_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); return glue_xts_crypt_128bit(&cast6_enc_xts, desc, dst, src, nbytes, XTS_TWEAK_CAST(__cast6_encrypt), &ctx->tweak_ctx, &ctx->crypt_ctx); } static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct cast6_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); return glue_xts_crypt_128bit(&cast6_dec_xts, desc, dst, src, nbytes, XTS_TWEAK_CAST(__cast6_encrypt), &ctx->tweak_ctx, &ctx->crypt_ctx); } static struct crypto_alg cast6_algs[10] = { { .cra_name = "__ecb-cast6-avx", .cra_driver_name = "__driver-ecb-cast6-avx", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = CAST6_BLOCK_SIZE, .cra_ctxsize = sizeof(struct cast6_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = CAST6_MIN_KEY_SIZE, .max_keysize = CAST6_MAX_KEY_SIZE, .setkey = cast6_setkey, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, }, }, { .cra_name = "__cbc-cast6-avx", .cra_driver_name = "__driver-cbc-cast6-avx", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = CAST6_BLOCK_SIZE, .cra_ctxsize = sizeof(struct cast6_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = CAST6_MIN_KEY_SIZE, .max_keysize = CAST6_MAX_KEY_SIZE, .setkey = cast6_setkey, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, }, }, }, { .cra_name = "__ctr-cast6-avx", .cra_driver_name = "__driver-ctr-cast6-avx", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct cast6_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = CAST6_MIN_KEY_SIZE, .max_keysize = CAST6_MAX_KEY_SIZE, .ivsize = CAST6_BLOCK_SIZE, .setkey = cast6_setkey, .encrypt = ctr_crypt, .decrypt = ctr_crypt, }, }, }, { .cra_name = "__lrw-cast6-avx", .cra_driver_name = "__driver-lrw-cast6-avx", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = CAST6_BLOCK_SIZE, .cra_ctxsize = sizeof(struct cast6_lrw_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_exit = lrw_exit_tfm, .cra_u = { .blkcipher = { .min_keysize = CAST6_MIN_KEY_SIZE + CAST6_BLOCK_SIZE, .max_keysize = CAST6_MAX_KEY_SIZE + CAST6_BLOCK_SIZE, .ivsize = CAST6_BLOCK_SIZE, .setkey = lrw_cast6_setkey, .encrypt = lrw_encrypt, .decrypt = lrw_decrypt, }, }, }, { .cra_name = "__xts-cast6-avx",
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct lrw_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .table_ctx = &ctx->lrw_table, .crypt_ctx = aes_ctx(ctx->raw_aes_ctx), .crypt_fn = lrw_xts_encrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = lrw_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct lrw_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .table_ctx = &ctx->lrw_table, .crypt_ctx = aes_ctx(ctx->raw_aes_ctx), .crypt_fn = lrw_xts_decrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = lrw_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm); int err; err = xts_check_key(tfm, key, keylen); if (err) return err; /* first half of xts-key is for crypt */ err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2); if (err) return err; /* second half of xts-key is for tweak */ return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2, keylen / 2); } static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in) { aesni_enc(ctx, out, in); } #ifdef CONFIG_X86_64 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) { glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc)); }
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[3]; struct lrw_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .table_ctx = &ctx->lrw_table, .crypt_ctx = &ctx->twofish_ctx, .crypt_fn = encrypt_callback, }; return lrw_crypt(desc, dst, src, nbytes, &req); } static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[3]; struct lrw_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .table_ctx = &ctx->lrw_table, .crypt_ctx = &ctx->twofish_ctx, .crypt_fn = decrypt_callback, }; return lrw_crypt(desc, dst, src, nbytes, &req); } void lrw_twofish_exit_tfm(struct crypto_tfm *tfm) { struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm); lrw_free_table(&ctx->lrw_table); } EXPORT_SYMBOL_GPL(lrw_twofish_exit_tfm); int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct twofish_xts_ctx *ctx = crypto_tfm_ctx(tfm); u32 *flags = &tfm->crt_flags; int err; err = xts_check_key(tfm, key, keylen); if (err) return err; /* first half of xts-key is for crypt */ err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2, flags); if (err) return err; /* second half of xts-key is for tweak */ return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2, flags); } EXPORT_SYMBOL_GPL(xts_twofish_setkey); static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[3]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = &ctx->tweak_ctx, .tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk), .crypt_ctx = &ctx->crypt_ctx, .crypt_fn = encrypt_callback, }; return xts_crypt(desc, dst, src, nbytes, &req); } static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[3]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = &ctx->tweak_ctx, .tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk), .crypt_ctx = &ctx->crypt_ctx, .crypt_fn = decrypt_callback, }; return xts_crypt(desc, dst, src, nbytes, &req); } static struct crypto_alg tf_algs[5] = { { .cra_name = "ecb(twofish)", .cra_driver_name = "ecb-twofish-3way", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = TF_BLOCK_SIZE, .cra_ctxsize = sizeof(struct twofish_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = TF_MIN_KEY_SIZE, .max_keysize = TF_MAX_KEY_SIZE, .setkey = twofish_setkey, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, }, }, { .cra_name = "cbc(twofish)", .cra_driver_name = "cbc-twofish-3way", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = TF_BLOCK_SIZE, .cra_ctxsize = sizeof(struct twofish_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = TF_MIN_KEY_SIZE, .max_keysize = TF_MAX_KEY_SIZE, .ivsize = TF_BLOCK_SIZE, .setkey = twofish_setkey, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, }, }, }, { .cra_name = "ctr(twofish)", .cra_driver_name = "ctr-twofish-3way", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct twofish_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = TF_MIN_KEY_SIZE, .max_keysize = TF_MAX_KEY_SIZE, .ivsize = TF_BLOCK_SIZE, .setkey = twofish_setkey, .encrypt = ctr_crypt, .decrypt = ctr_crypt, }, }, }, { .cra_name = "lrw(twofish)", .cra_driver_name = "lrw-twofish-3way", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = TF_BLOCK_SIZE, .cra_ctxsize = sizeof(struct twofish_lrw_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_exit = lrw_twofish_exit_tfm, .cra_u = { .blkcipher = { .min_keysize = TF_MIN_KEY_SIZE + TF_BLOCK_SIZE, .max_keysize = TF_MAX_KEY_SIZE + TF_BLOCK_SIZE, .ivsize = TF_BLOCK_SIZE, .setkey = lrw_twofish_setkey, .encrypt = lrw_encrypt, .decrypt = lrw_decrypt, }, }, }, { .cra_name = "xts(twofish)",
static int cc_cipher_setkey(struct crypto_ablkcipher *atfm, const u8 *key, unsigned int keylen) { struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm); struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); struct device *dev = drvdata_to_dev(ctx_p->drvdata); u32 tmp[DES_EXPKEY_WORDS]; unsigned int max_key_buf_size = get_max_keysize(tfm); dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n", ctx_p, crypto_tfm_alg_name(tfm), keylen); dump_byte_array("key", (u8 *)key, keylen); /* STAT_PHASE_0: Init and sanity checks */ if (validate_keys_sizes(ctx_p, keylen)) { dev_err(dev, "Unsupported key size %d.\n", keylen); crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } if (cc_is_hw_key(tfm)) { /* setting HW key slots */ struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key; if (ctx_p->flow_mode != S_DIN_to_AES) { dev_err(dev, "HW key not supported for non-AES flows\n"); return -EINVAL; } ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1); if (ctx_p->hw.key1_slot == END_OF_KEYS) { dev_err(dev, "Unsupported hw key1 number (%d)\n", hki->hw_key1); return -EINVAL; } if (ctx_p->cipher_mode == DRV_CIPHER_XTS || ctx_p->cipher_mode == DRV_CIPHER_ESSIV || ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) { if (hki->hw_key1 == hki->hw_key2) { dev_err(dev, "Illegal hw key numbers (%d,%d)\n", hki->hw_key1, hki->hw_key2); return -EINVAL; } ctx_p->hw.key2_slot = hw_key_to_cc_hw_key(hki->hw_key2); if (ctx_p->hw.key2_slot == END_OF_KEYS) { dev_err(dev, "Unsupported hw key2 number (%d)\n", hki->hw_key2); return -EINVAL; } } ctx_p->keylen = keylen; dev_dbg(dev, "cc_is_hw_key ret 0"); return 0; } // verify weak keys if (ctx_p->flow_mode == S_DIN_to_DES) { if (!des_ekey(tmp, key) && (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; dev_dbg(dev, "weak DES key"); return -EINVAL; } } if (ctx_p->cipher_mode == DRV_CIPHER_XTS && xts_check_key(tfm, key, keylen)) { dev_dbg(dev, "weak XTS key"); return -EINVAL; } if (ctx_p->flow_mode == S_DIN_to_DES && keylen == DES3_EDE_KEY_SIZE && cc_verify_3des_keys(key, keylen)) { dev_dbg(dev, "weak 3DES key"); return -EINVAL; } /* STAT_PHASE_1: Copy key to ctx */ dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr, max_key_buf_size, DMA_TO_DEVICE); memcpy(ctx_p->user.key, key, keylen); if (keylen == 24) memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24); if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { /* sha256 for key2 - use sw implementation */ int key_len = keylen >> 1; int err; SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm); desc->tfm = ctx_p->shash_tfm; err = crypto_shash_digest(desc, ctx_p->user.key, key_len, ctx_p->user.key + key_len); if (err) { dev_err(dev, "Failed to hash ESSIV key.\n"); return err; } }
static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, unsigned int keylen) { struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm); struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); struct device *dev = drvdata_to_dev(ctx_p->drvdata); u32 tmp[DES3_EDE_EXPKEY_WORDS]; struct cc_crypto_alg *cc_alg = container_of(tfm->__crt_alg, struct cc_crypto_alg, skcipher_alg.base); unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize; dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n", ctx_p, crypto_tfm_alg_name(tfm), keylen); dump_byte_array("key", (u8 *)key, keylen); /* STAT_PHASE_0: Init and sanity checks */ if (validate_keys_sizes(ctx_p, keylen)) { dev_err(dev, "Unsupported key size %d.\n", keylen); crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } ctx_p->hw_key = false; /* * Verify DES weak keys * Note that we're dropping the expanded key since the * HW does the expansion on its own. */ if (ctx_p->flow_mode == S_DIN_to_DES) { if (keylen == DES3_EDE_KEY_SIZE && __des3_ede_setkey(tmp, &tfm->crt_flags, key, DES3_EDE_KEY_SIZE)) { dev_dbg(dev, "weak 3DES key"); return -EINVAL; } else if (!des_ekey(tmp, key) && (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; dev_dbg(dev, "weak DES key"); return -EINVAL; } } if (ctx_p->cipher_mode == DRV_CIPHER_XTS && xts_check_key(tfm, key, keylen)) { dev_dbg(dev, "weak XTS key"); return -EINVAL; } /* STAT_PHASE_1: Copy key to ctx */ dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr, max_key_buf_size, DMA_TO_DEVICE); memcpy(ctx_p->user.key, key, keylen); if (keylen == 24) memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24); if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { /* sha256 for key2 - use sw implementation */ int key_len = keylen >> 1; int err; SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm); desc->tfm = ctx_p->shash_tfm; err = crypto_shash_digest(desc, ctx_p->user.key, key_len, ctx_p->user.key + key_len); if (err) { dev_err(dev, "Failed to hash ESSIV key.\n"); return err; } }