static struct skcipher_request *init_skcipher_req(const u8 *key, unsigned int key_len) { struct skcipher_request *req; struct crypto_skcipher *tfm; int ret; tfm = crypto_alloc_skcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { pr_err("encrypted_key: failed to load %s transform (%ld)\n", blkcipher_alg, PTR_ERR(tfm)); return ERR_CAST(tfm); } ret = crypto_skcipher_setkey(tfm, key, key_len); if (ret < 0) { pr_err("encrypted_key: failed to setkey (%d)\n", ret); crypto_free_skcipher(tfm); return ERR_PTR(ret); } req = skcipher_request_alloc(tfm, GFP_KERNEL); if (!req) { pr_err("encrypted_key: failed to allocate request for %s\n", blkcipher_alg); crypto_free_skcipher(tfm); return ERR_PTR(-ENOMEM); } skcipher_request_set_callback(req, 0, NULL, NULL); return req; }
static int crypto_rfc3686_setkey(struct crypto_skcipher *parent, const u8 *key, unsigned int keylen) { struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(parent); struct crypto_skcipher *child = ctx->child; int err; /* the nonce is stored in bytes at end of key */ if (keylen < CTR_RFC3686_NONCE_SIZE) return -EINVAL; memcpy(ctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE); keylen -= CTR_RFC3686_NONCE_SIZE; crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & CRYPTO_TFM_REQ_MASK); err = crypto_skcipher_setkey(child, key, keylen); crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & CRYPTO_TFM_RES_MASK); return err; }
/* * Encrypt/decrypt big_key data */ static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key) { int ret = -EINVAL; struct scatterlist sgio; SKCIPHER_REQUEST_ON_STACK(req, big_key_skcipher); if (crypto_skcipher_setkey(big_key_skcipher, key, ENC_KEY_SIZE)) { ret = -EAGAIN; goto error; } skcipher_request_set_tfm(req, big_key_skcipher); skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); sg_init_one(&sgio, data, datalen); skcipher_request_set_crypt(req, &sgio, &sgio, datalen, NULL); if (op == BIG_KEY_ENC) ret = crypto_skcipher_encrypt(req); else ret = crypto_skcipher_decrypt(req); skcipher_request_zero(req); error: return ret; }
/* * WUSB Pseudo Random Function (WUSB1.0[6.5]) * * @b: buffer to the source data; cannot be a global or const local * (will confuse the scatterlists) */ ssize_t wusb_prf(void *out, size_t out_size, const u8 key[16], const struct aes_ccm_nonce *_n, const struct aes_ccm_label *a, const void *b, size_t blen, size_t len) { ssize_t result, bytes = 0, bitr; struct aes_ccm_nonce n = *_n; struct crypto_skcipher *tfm_cbc; struct crypto_cipher *tfm_aes; u64 sfn = 0; __le64 sfn_le; tfm_cbc = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm_cbc)) { result = PTR_ERR(tfm_cbc); printk(KERN_ERR "E: can't load CBC(AES): %d\n", (int)result); goto error_alloc_cbc; } result = crypto_skcipher_setkey(tfm_cbc, key, 16); if (result < 0) { printk(KERN_ERR "E: can't set CBC key: %d\n", (int)result); goto error_setkey_cbc; } tfm_aes = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm_aes)) { result = PTR_ERR(tfm_aes); printk(KERN_ERR "E: can't load AES: %d\n", (int)result); goto error_alloc_aes; } result = crypto_cipher_setkey(tfm_aes, key, 16); if (result < 0) { printk(KERN_ERR "E: can't set AES key: %d\n", (int)result); goto error_setkey_aes; } for (bitr = 0; bitr < (len + 63) / 64; bitr++) { sfn_le = cpu_to_le64(sfn++); memcpy(&n.sfn, &sfn_le, sizeof(n.sfn)); /* n.sfn++... */ result = wusb_ccm_mac(tfm_cbc, tfm_aes, out + bytes, &n, a, b, blen); if (result < 0) goto error_ccm_mac; bytes += result; } result = bytes; error_ccm_mac: error_setkey_aes: crypto_free_cipher(tfm_aes); error_alloc_aes: error_setkey_cbc: crypto_free_skcipher(tfm_cbc); error_alloc_cbc: return result; }
static inline const void * get_key(const void *p, const void *end, struct krb5_ctx *ctx, struct crypto_skcipher **res) { struct xdr_netobj key; int alg; p = simple_get_bytes(p, end, &alg, sizeof(alg)); if (IS_ERR(p)) goto out_err; switch (alg) { case ENCTYPE_DES_CBC_CRC: case ENCTYPE_DES_CBC_MD4: case ENCTYPE_DES_CBC_MD5: /* Map all these key types to ENCTYPE_DES_CBC_RAW */ alg = ENCTYPE_DES_CBC_RAW; break; } if (!supported_gss_krb5_enctype(alg)) { printk(KERN_WARNING "gss_kerberos_mech: unsupported " "encryption key algorithm %d\n", alg); p = ERR_PTR(-EINVAL); goto out_err; } p = simple_get_netobj(p, end, &key); if (IS_ERR(p)) goto out_err; *res = crypto_alloc_skcipher(ctx->gk5e->encrypt_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(*res)) { printk(KERN_WARNING "gss_kerberos_mech: unable to initialize " "crypto algorithm %s\n", ctx->gk5e->encrypt_name); *res = NULL; goto out_err_free_key; } if (crypto_skcipher_setkey(*res, key.data, key.len)) { printk(KERN_WARNING "gss_kerberos_mech: error setting key for " "crypto algorithm %s\n", ctx->gk5e->encrypt_name); goto out_err_free_tfm; } kfree(key.data); return p; out_err_free_tfm: crypto_free_skcipher(*res); out_err_free_key: kfree(key.data); p = ERR_PTR(-EINVAL); out_err: return p; }
static int simd_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int key_len) { struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_skcipher *child = &ctx->cryptd_tfm->base; int err; crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); err = crypto_skcipher_setkey(child, key, key_len); crypto_skcipher_set_flags(tfm, crypto_skcipher_get_flags(child) & CRYPTO_TFM_RES_MASK); return err; }
static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv) { struct scatterlist iv_sg, tag_sg; struct skcipher_request *sk_req; struct omap_aes_gcm_result result; struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); int ret = 0; sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL); if (!sk_req) { pr_err("skcipher: Failed to allocate request\n"); return -ENOMEM; } init_completion(&result.completion); sg_init_one(&iv_sg, iv, AES_BLOCK_SIZE); sg_init_one(&tag_sg, tag, AES_BLOCK_SIZE); skcipher_request_set_callback(sk_req, CRYPTO_TFM_REQ_MAY_BACKLOG, omap_aes_gcm_complete, &result); ret = crypto_skcipher_setkey(ctx->ctr, (u8 *)ctx->key, ctx->keylen); skcipher_request_set_crypt(sk_req, &iv_sg, &tag_sg, AES_BLOCK_SIZE, NULL); ret = crypto_skcipher_encrypt(sk_req); switch (ret) { case 0: break; case -EINPROGRESS: case -EBUSY: ret = wait_for_completion_interruptible(&result.completion); if (!ret) { ret = result.err; if (!ret) { reinit_completion(&result.completion); break; } } /* fall through */ default: pr_err("Encryption of IV failed for GCM mode\n"); break; } skcipher_request_free(sk_req); return ret; }
/** * f2fs_derive_key_aes() - Derive a key using AES-128-ECB * @deriving_key: Encryption key used for derivatio. * @source_key: Source key to which to apply derivation. * @derived_key: Derived key. * * Return: Zero on success; non-zero otherwise. */ static int f2fs_derive_key_aes(char deriving_key[F2FS_AES_128_ECB_KEY_SIZE], char source_key[F2FS_AES_256_XTS_KEY_SIZE], char derived_key[F2FS_AES_256_XTS_KEY_SIZE]) { int res = 0; struct skcipher_request *req = NULL; DECLARE_F2FS_COMPLETION_RESULT(ecr); struct scatterlist src_sg, dst_sg; struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); if (IS_ERR(tfm)) { res = PTR_ERR(tfm); tfm = NULL; goto out; } crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); req = skcipher_request_alloc(tfm, GFP_NOFS); if (!req) { res = -ENOMEM; goto out; } skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, derive_crypt_complete, &ecr); res = crypto_skcipher_setkey(tfm, deriving_key, F2FS_AES_128_ECB_KEY_SIZE); if (res < 0) goto out; sg_init_one(&src_sg, source_key, F2FS_AES_256_XTS_KEY_SIZE); sg_init_one(&dst_sg, derived_key, F2FS_AES_256_XTS_KEY_SIZE); skcipher_request_set_crypt(req, &src_sg, &dst_sg, F2FS_AES_256_XTS_KEY_SIZE, NULL); res = crypto_skcipher_encrypt(req); if (res == -EINPROGRESS || res == -EBUSY) { BUG_ON(req->base.data != &ecr); wait_for_completion(&ecr.completion); res = ecr.res; } out: skcipher_request_free(req); crypto_free_skcipher(tfm); return res; }
static struct crypto_skcipher * context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key) { struct crypto_skcipher *cp; cp = crypto_alloc_skcipher(cname, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(cp)) { dprintk("gss_kerberos_mech: unable to initialize " "crypto algorithm %s\n", cname); return NULL; } if (crypto_skcipher_setkey(cp, key, ctx->gk5e->keylength)) { dprintk("gss_kerberos_mech: error setting key for " "crypto algorithm %s\n", cname); crypto_free_skcipher(cp); return NULL; } return cp; }
static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { int ret; struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); ret = xts_check_key(tfm, key, keylen); if (ret) return ret; preempt_disable(); pagefault_disable(); enable_kernel_vsx(); ret = aes_p8_set_encrypt_key(key + keylen/2, (keylen/2) * 8, &ctx->tweak_key); ret += aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key); ret += aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key); disable_kernel_vsx(); pagefault_enable(); preempt_enable(); ret += crypto_skcipher_setkey(ctx->fallback, key, keylen); return ret; }
static int setkey(struct crypto_skcipher *parent, const u8 *key, unsigned int keylen) { struct priv *ctx = crypto_skcipher_ctx(parent); struct crypto_skcipher *child = ctx->child; int err, bsize = LRW_BLOCK_SIZE; const u8 *tweak = key + keylen - bsize; be128 tmp = { 0 }; int i; crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & CRYPTO_TFM_REQ_MASK); err = crypto_skcipher_setkey(child, key, keylen - bsize); crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & CRYPTO_TFM_RES_MASK); if (err) return err; if (ctx->table) gf128mul_free_64k(ctx->table); /* initialize multiplication table for Key2 */ ctx->table = gf128mul_init_64k_bbe((be128 *)tweak); if (!ctx->table) return -ENOMEM; /* initialize optimization table */ for (i = 0; i < 128; i++) { setbit128_bbe(&tmp, i); ctx->mulinc[i] = tmp; gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); } return 0; }
int fscrypt_get_crypt_info(struct inode *inode) { struct fscrypt_info *crypt_info; struct fscrypt_context ctx; struct crypto_skcipher *ctfm; const char *cipher_str; int keysize; u8 *raw_key = NULL; int res; res = fscrypt_initialize(inode->i_sb->s_cop->flags); if (res) return res; if (!inode->i_sb->s_cop->get_context) return -EOPNOTSUPP; retry: crypt_info = ACCESS_ONCE(inode->i_crypt_info); if (crypt_info) { if (!crypt_info->ci_keyring_key || key_validate(crypt_info->ci_keyring_key) == 0) return 0; fscrypt_put_encryption_info(inode, crypt_info); goto retry; } res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); if (res < 0) { if (!fscrypt_dummy_context_enabled(inode)) return res; ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS; ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS; ctx.flags = 0; } else if (res != sizeof(ctx)) { return -EINVAL; } if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1) return -EINVAL; if (ctx.flags & ~FS_POLICY_FLAGS_VALID) return -EINVAL; crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS); if (!crypt_info) return -ENOMEM; crypt_info->ci_flags = ctx.flags; crypt_info->ci_data_mode = ctx.contents_encryption_mode; crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; crypt_info->ci_ctfm = NULL; crypt_info->ci_keyring_key = NULL; memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, sizeof(crypt_info->ci_master_key)); res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize); if (res) goto out; /* * This cannot be a stack buffer because it is passed to the scatterlist * crypto API as part of key derivation. */ res = -ENOMEM; raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS); if (!raw_key) goto out; if (fscrypt_dummy_context_enabled(inode)) { memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE); goto got_key; } res = validate_user_key(crypt_info, &ctx, raw_key, FS_KEY_DESC_PREFIX, FS_KEY_DESC_PREFIX_SIZE); if (res && inode->i_sb->s_cop->key_prefix) { u8 *prefix = NULL; int prefix_size, res2; prefix_size = inode->i_sb->s_cop->key_prefix(inode, &prefix); res2 = validate_user_key(crypt_info, &ctx, raw_key, prefix, prefix_size); if (res2) { if (res2 == -ENOKEY) res = -ENOKEY; goto out; } } else if (res) { goto out; } got_key: ctfm = crypto_alloc_skcipher(cipher_str, 0, 0); if (!ctfm || IS_ERR(ctfm)) { res = ctfm ? PTR_ERR(ctfm) : -ENOMEM; printk(KERN_DEBUG "%s: error %d (inode %u) allocating crypto tfm\n", __func__, res, (unsigned) inode->i_ino); goto out; } crypt_info->ci_ctfm = ctfm; crypto_skcipher_clear_flags(ctfm, ~0); crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY); res = crypto_skcipher_setkey(ctfm, raw_key, keysize); if (res) goto out; kzfree(raw_key); raw_key = NULL; if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) { put_crypt_info(crypt_info); goto retry; } return 0; out: if (res == -ENOKEY) res = 0; put_crypt_info(crypt_info); kzfree(raw_key); return res; }
int get_crypt_info(struct inode *inode) { struct fscrypt_info *crypt_info; struct fscrypt_context ctx; struct crypto_skcipher *ctfm; const char *cipher_str; u8 raw_key[FS_MAX_KEY_SIZE]; u8 mode; int res; res = fscrypt_initialize(); if (res) return res; if (!inode->i_sb->s_cop->get_context) return -EOPNOTSUPP; retry: crypt_info = ACCESS_ONCE(inode->i_crypt_info); if (crypt_info) { if (!crypt_info->ci_keyring_key || key_validate(crypt_info->ci_keyring_key) == 0) return 0; fscrypt_put_encryption_info(inode, crypt_info); goto retry; } res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); if (res < 0) { if (!fscrypt_dummy_context_enabled(inode)) return res; ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS; ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS; ctx.flags = 0; } else if (res != sizeof(ctx)) { return -EINVAL; } res = 0; crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS); if (!crypt_info) return -ENOMEM; crypt_info->ci_flags = ctx.flags; crypt_info->ci_data_mode = ctx.contents_encryption_mode; crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; crypt_info->ci_ctfm = NULL; crypt_info->ci_keyring_key = NULL; memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, sizeof(crypt_info->ci_master_key)); if (S_ISREG(inode->i_mode)) mode = crypt_info->ci_data_mode; else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) mode = crypt_info->ci_filename_mode; else BUG(); switch (mode) { case FS_ENCRYPTION_MODE_AES_256_XTS: cipher_str = "xts(aes)"; break; case FS_ENCRYPTION_MODE_AES_256_CTS: cipher_str = "cts(cbc(aes))"; break; default: printk_once(KERN_WARNING "%s: unsupported key mode %d (ino %u)\n", __func__, mode, (unsigned) inode->i_ino); res = -ENOKEY; goto out; } if (fscrypt_dummy_context_enabled(inode)) { memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE); goto got_key; } res = validate_user_key(crypt_info, &ctx, raw_key, FS_KEY_DESC_PREFIX, FS_KEY_DESC_PREFIX_SIZE); if (res && inode->i_sb->s_cop->key_prefix) { u8 *prefix = NULL; int prefix_size, res2; prefix_size = inode->i_sb->s_cop->key_prefix(inode, &prefix); res2 = validate_user_key(crypt_info, &ctx, raw_key, prefix, prefix_size); if (res2) { if (res2 == -ENOKEY) res = -ENOKEY; goto out; } } else if (res) { goto out; } got_key: ctfm = crypto_alloc_skcipher(cipher_str, 0, 0); if (!ctfm || IS_ERR(ctfm)) { res = ctfm ? PTR_ERR(ctfm) : -ENOMEM; printk(KERN_DEBUG "%s: error %d (inode %u) allocating crypto tfm\n", __func__, res, (unsigned) inode->i_ino); goto out; } crypt_info->ci_ctfm = ctfm; crypto_skcipher_clear_flags(ctfm, ~0); crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY); res = crypto_skcipher_setkey(ctfm, raw_key, fscrypt_key_size(mode)); if (res) goto out; memzero_explicit(raw_key, sizeof(raw_key)); if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) { put_crypt_info(crypt_info); goto retry; } return 0; out: if (res == -ENOKEY) res = 0; put_crypt_info(crypt_info); memzero_explicit(raw_key, sizeof(raw_key)); return res; }
int _f2fs_get_encryption_info(struct inode *inode) { struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_crypt_info *crypt_info; char full_key_descriptor[F2FS_KEY_DESC_PREFIX_SIZE + (F2FS_KEY_DESCRIPTOR_SIZE * 2) + 1]; struct key *keyring_key = NULL; struct f2fs_encryption_key *master_key; struct f2fs_encryption_context ctx; const struct user_key_payload *ukp; struct crypto_skcipher *ctfm; const char *cipher_str; char raw_key[F2FS_MAX_KEY_SIZE]; char mode; int res; res = f2fs_crypto_initialize(); if (res) return res; retry: crypt_info = ACCESS_ONCE(fi->i_crypt_info); if (crypt_info) { if (!crypt_info->ci_keyring_key || key_validate(crypt_info->ci_keyring_key) == 0) return 0; f2fs_free_encryption_info(inode, crypt_info); goto retry; } res = f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, sizeof(ctx), NULL); if (res < 0) return res; else if (res != sizeof(ctx)) return -EINVAL; res = 0; crypt_info = kmem_cache_alloc(f2fs_crypt_info_cachep, GFP_NOFS); if (!crypt_info) return -ENOMEM; crypt_info->ci_flags = ctx.flags; crypt_info->ci_data_mode = ctx.contents_encryption_mode; crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; crypt_info->ci_ctfm = NULL; crypt_info->ci_keyring_key = NULL; memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, sizeof(crypt_info->ci_master_key)); if (S_ISREG(inode->i_mode)) mode = crypt_info->ci_data_mode; else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) mode = crypt_info->ci_filename_mode; else BUG(); switch (mode) { case F2FS_ENCRYPTION_MODE_AES_256_XTS: cipher_str = "xts(aes)"; break; case F2FS_ENCRYPTION_MODE_AES_256_CTS: cipher_str = "cts(cbc(aes))"; break; default: printk_once(KERN_WARNING "f2fs: unsupported key mode %d (ino %u)\n", mode, (unsigned) inode->i_ino); res = -ENOKEY; goto out; } memcpy(full_key_descriptor, F2FS_KEY_DESC_PREFIX, F2FS_KEY_DESC_PREFIX_SIZE); sprintf(full_key_descriptor + F2FS_KEY_DESC_PREFIX_SIZE, "%*phN", F2FS_KEY_DESCRIPTOR_SIZE, ctx.master_key_descriptor); full_key_descriptor[F2FS_KEY_DESC_PREFIX_SIZE + (2 * F2FS_KEY_DESCRIPTOR_SIZE)] = '\0'; keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL); if (IS_ERR(keyring_key)) { res = PTR_ERR(keyring_key); keyring_key = NULL; goto out; } crypt_info->ci_keyring_key = keyring_key; BUG_ON(keyring_key->type != &key_type_logon); ukp = user_key_payload(keyring_key); if (ukp->datalen != sizeof(struct f2fs_encryption_key)) { res = -EINVAL; goto out; } master_key = (struct f2fs_encryption_key *)ukp->data; BUILD_BUG_ON(F2FS_AES_128_ECB_KEY_SIZE != F2FS_KEY_DERIVATION_NONCE_SIZE); BUG_ON(master_key->size != F2FS_AES_256_XTS_KEY_SIZE); res = f2fs_derive_key_aes(ctx.nonce, master_key->raw, raw_key); if (res) goto out; ctfm = crypto_alloc_skcipher(cipher_str, 0, 0); if (!ctfm || IS_ERR(ctfm)) { res = ctfm ? PTR_ERR(ctfm) : -ENOMEM; printk(KERN_DEBUG "%s: error %d (inode %u) allocating crypto tfm\n", __func__, res, (unsigned) inode->i_ino); goto out; } crypt_info->ci_ctfm = ctfm; crypto_skcipher_clear_flags(ctfm, ~0); crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY); res = crypto_skcipher_setkey(ctfm, raw_key, f2fs_encryption_key_size(mode)); if (res) goto out; memzero_explicit(raw_key, sizeof(raw_key)); if (cmpxchg(&fi->i_crypt_info, NULL, crypt_info) != NULL) { f2fs_free_crypt_info(crypt_info); goto retry; } return 0; out: if (res == -ENOKEY && !S_ISREG(inode->i_mode)) res = 0; f2fs_free_crypt_info(crypt_info); memzero_explicit(raw_key, sizeof(raw_key)); return res; }
int fscrypt_get_encryption_info(struct inode *inode) { struct fscrypt_info *crypt_info; struct fscrypt_context ctx; struct crypto_skcipher *ctfm; const char *cipher_str; int keysize; u8 *raw_key = NULL; int res; if (inode->i_crypt_info) return 0; res = fscrypt_initialize(inode->i_sb->s_cop->flags); if (res) return res; res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); if (res < 0) { if (!fscrypt_dummy_context_enabled(inode) || inode->i_sb->s_cop->is_encrypted(inode)) return res; /* Fake up a context for an unencrypted directory */ memset(&ctx, 0, sizeof(ctx)); ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS; ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS; memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE); } else if (res != sizeof(ctx)) { return -EINVAL; } if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1) return -EINVAL; if (ctx.flags & ~FS_POLICY_FLAGS_VALID) return -EINVAL; crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS); if (!crypt_info) return -ENOMEM; crypt_info->ci_flags = ctx.flags; crypt_info->ci_data_mode = ctx.contents_encryption_mode; crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; crypt_info->ci_ctfm = NULL; crypt_info->ci_essiv_tfm = NULL; memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, sizeof(crypt_info->ci_master_key)); res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize); if (res) goto out; /* * This cannot be a stack buffer because it is passed to the scatterlist * crypto API as part of key derivation. */ res = -ENOMEM; raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS); if (!raw_key) goto out; res = validate_user_key(crypt_info, &ctx, raw_key, FS_KEY_DESC_PREFIX, keysize); if (res && inode->i_sb->s_cop->key_prefix) { int res2 = validate_user_key(crypt_info, &ctx, raw_key, inode->i_sb->s_cop->key_prefix, keysize); if (res2) { if (res2 == -ENOKEY) res = -ENOKEY; goto out; } } else if (res) { goto out; } ctfm = crypto_alloc_skcipher(cipher_str, 0, 0); if (!ctfm || IS_ERR(ctfm)) { res = ctfm ? PTR_ERR(ctfm) : -ENOMEM; pr_debug("%s: error %d (inode %lu) allocating crypto tfm\n", __func__, res, inode->i_ino); goto out; } crypt_info->ci_ctfm = ctfm; crypto_skcipher_clear_flags(ctfm, ~0); crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY); /* * if the provided key is longer than keysize, we use the first * keysize bytes of the derived key only */ res = crypto_skcipher_setkey(ctfm, raw_key, keysize); if (res) goto out; if (S_ISREG(inode->i_mode) && crypt_info->ci_data_mode == FS_ENCRYPTION_MODE_AES_128_CBC) { res = init_essiv_generator(crypt_info, raw_key, keysize); if (res) { pr_debug("%s: error %d (inode %lu) allocating essiv tfm\n", __func__, res, inode->i_ino); goto out; } } if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL) crypt_info = NULL; out: if (res == -ENOKEY) res = 0; put_crypt_info(crypt_info); kzfree(raw_key); return res; }