static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_ahash *ghash = ctx->ghash; struct crypto_ablkcipher *ctr = ctx->ctr; struct { be128 hash; u8 iv[8]; struct crypto_gcm_setkey_result result; struct scatterlist sg[1]; struct ablkcipher_request req; } *data; int err; crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); err = crypto_ablkcipher_setkey(ctr, key, keylen); if (err) return err; crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) & CRYPTO_TFM_RES_MASK); data = kzalloc(sizeof(*data) + crypto_ablkcipher_reqsize(ctr), GFP_KERNEL); if (!data) return -ENOMEM; init_completion(&data->result.completion); sg_init_one(data->sg, &data->hash, sizeof(data->hash)); ablkcipher_request_set_tfm(&data->req, ctr); ablkcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_gcm_setkey_done, &data->result); ablkcipher_request_set_crypt(&data->req, data->sg, data->sg, sizeof(data->hash), data->iv); err = crypto_ablkcipher_encrypt(&data->req); if (err == -EINPROGRESS || err == -EBUSY) { err = wait_for_completion_interruptible( &data->result.completion); if (!err) err = data->result.err; } if (err) goto out; crypto_ahash_clear_flags(ghash, CRYPTO_TFM_REQ_MASK); crypto_ahash_set_flags(ghash, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128)); crypto_aead_set_flags(aead, crypto_ahash_get_flags(ghash) & CRYPTO_TFM_RES_MASK); out: kfree(data); return err; }
static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req); struct ablkcipher_request *subreq; crypto_completion_t complete; void *data; struct scatterlist *osrc, *odst; struct scatterlist *dst; struct page *srcp; struct page *dstp; u8 *giv; u8 *vsrc; u8 *vdst; __be64 seq; unsigned int ivsize; unsigned int len; int err; subreq = (void *)(reqctx->tail + ctx->reqoff); ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); giv = req->giv; complete = req->creq.base.complete; data = req->creq.base.data; osrc = req->creq.src; odst = req->creq.dst; srcp = sg_page(osrc); dstp = sg_page(odst); vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + osrc->offset; vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + odst->offset; ivsize = crypto_ablkcipher_ivsize(geniv); if (vsrc != giv + ivsize && vdst != giv + ivsize) { giv = PTR_ALIGN((u8 *)reqctx->tail, crypto_ablkcipher_alignmask(geniv) + 1); complete = eseqiv_complete; data = req; } ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete, data); sg_init_table(reqctx->src, 2); sg_set_buf(reqctx->src, giv, ivsize); scatterwalk_crypto_chain(reqctx->src, osrc, vsrc == giv + ivsize, 2); dst = reqctx->src; if (osrc != odst) { sg_init_table(reqctx->dst, 2); sg_set_buf(reqctx->dst, giv, ivsize); scatterwalk_crypto_chain(reqctx->dst, odst, vdst == giv + ivsize, 2); dst = reqctx->dst; } ablkcipher_request_set_crypt(subreq, reqctx->src, dst, req->creq.nbytes + ivsize, req->creq.info); memcpy(req->creq.info, ctx->salt, ivsize); len = ivsize; if (ivsize > sizeof(u64)) { memset(req->giv, 0, ivsize - sizeof(u64)); len = sizeof(u64); } seq = cpu_to_be64(req->seq); memcpy(req->giv + ivsize - len, &seq, len); err = crypto_ablkcipher_encrypt(subreq); if (err) goto out; if (giv != req->giv) eseqiv_complete2(req); out: return err; }
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), .tweak_fn = aesni_xts_tweak, .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), .crypt_fn = lrw_xts_encrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = xts_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), .tweak_fn = aesni_xts_tweak, .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), .crypt_fn = lrw_xts_decrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = xts_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } #endif #ifdef CONFIG_X86_64 static int rfc4106_init(struct crypto_tfm *tfm) { struct cryptd_aead *cryptd_tfm; struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); struct crypto_aead *cryptd_child; struct aesni_rfc4106_gcm_ctx *child_ctx; cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", CRYPTO_ALG_INTERNAL, CRYPTO_ALG_INTERNAL); if (IS_ERR(cryptd_tfm)) return PTR_ERR(cryptd_tfm); cryptd_child = cryptd_aead_child(cryptd_tfm); child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); memcpy(child_ctx, ctx, sizeof(*ctx)); ctx->cryptd_tfm = cryptd_tfm; tfm->crt_aead.reqsize = sizeof(struct aead_request) + crypto_aead_reqsize(&cryptd_tfm->base); return 0; } static void rfc4106_exit(struct crypto_tfm *tfm) { struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); if (!IS_ERR(ctx->cryptd_tfm)) cryptd_free_aead(ctx->cryptd_tfm); return; } static void rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err) { struct aesni_gcm_set_hash_subkey_result *result = req->data; if (err == -EINPROGRESS) return; result->err = err; complete(&result->completion); } static int rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) { struct crypto_ablkcipher *ctr_tfm; struct ablkcipher_request *req; int ret = -EINVAL; struct aesni_hash_subkey_req_data *req_data; ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0); if (IS_ERR(ctr_tfm)) return PTR_ERR(ctr_tfm); crypto_ablkcipher_clear_flags(ctr_tfm, ~0); ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len); if (ret) goto out_free_ablkcipher; ret = -ENOMEM; req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL); if (!req) goto out_free_ablkcipher; req_data = kmalloc(sizeof(*req_data), GFP_KERNEL); if (!req_data) goto out_free_request; memset(req_data->iv, 0, sizeof(req_data->iv)); /* Clear the data in the hash sub key container to zero.*/ /* We want to cipher all zeros to create the hash sub key. */ memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE); init_completion(&req_data->result.completion); sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE); ablkcipher_request_set_tfm(req, ctr_tfm); ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, rfc4106_set_hash_subkey_done, &req_data->result); ablkcipher_request_set_crypt(req, &req_data->sg, &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv); ret = crypto_ablkcipher_encrypt(req); if (ret == -EINPROGRESS || ret == -EBUSY) { ret = wait_for_completion_interruptible (&req_data->result.completion); if (!ret) ret = req_data->result.err; } kfree(req_data); out_free_request: ablkcipher_request_free(req); out_free_ablkcipher: crypto_free_ablkcipher(ctr_tfm); return ret; } static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key, unsigned int key_len) { int ret = 0; struct crypto_tfm *tfm = crypto_aead_tfm(aead); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead); u8 *new_key_align, *new_key_mem = NULL; if (key_len < 4) { crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } /*Account for 4 byte nonce at the end.*/ key_len -= 4; if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && key_len != AES_KEYSIZE_256) { crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce)); /*This must be on a 16 byte boundary!*/ if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN) return -EINVAL; if ((unsigned long)key % AESNI_ALIGN) { /*key is not aligned: use an auxuliar aligned pointer*/ new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL); if (!new_key_mem) return -ENOMEM; new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN); memcpy(new_key_align, key, key_len); key = new_key_align; } if (!irq_fpu_usable()) ret = crypto_aes_expand_key(&(ctx->aes_key_expanded), key, key_len); else { kernel_fpu_begin(); ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len); kernel_fpu_end(); } /*This must be on a 16 byte boundary!*/ if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) { ret = -EINVAL; goto exit; } ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); exit: kfree(new_key_mem); return ret; } static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, unsigned int key_len) { struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); struct crypto_aead *child = cryptd_aead_child(ctx->cryptd_tfm); struct aesni_rfc4106_gcm_ctx *c_ctx = aesni_rfc4106_gcm_ctx_get(child); struct cryptd_aead *cryptd_tfm = ctx->cryptd_tfm; int ret; ret = crypto_aead_setkey(child, key, key_len); if (!ret) { memcpy(ctx, c_ctx, sizeof(*ctx)); ctx->cryptd_tfm = cryptd_tfm; } return ret; } static int common_rfc4106_set_authsize(struct crypto_aead *aead, unsigned int authsize) { switch (authsize) { case 8: case 12: case 16: break; default: return -EINVAL; } crypto_aead_crt(aead)->authsize = authsize; return 0; } /* This is the Integrity Check Value (aka the authentication tag length and can * be 8, 12 or 16 bytes long. */ static int rfc4106_set_authsize(struct crypto_aead *parent, unsigned int authsize) { struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); struct crypto_aead *child = cryptd_aead_child(ctx->cryptd_tfm); int ret; ret = crypto_aead_setauthsize(child, authsize); if (!ret) crypto_aead_crt(parent)->authsize = authsize; return ret; } static int __driver_rfc4106_encrypt(struct aead_request *req) { u8 one_entry_in_sg = 0; u8 *src, *dst, *assoc; __be32 counter = cpu_to_be32(1); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); u32 key_len = ctx->aes_key_expanded.key_length; void *aes_ctx = &(ctx->aes_key_expanded); unsigned long auth_tag_len = crypto_aead_authsize(tfm); u8 iv_tab[16+AESNI_ALIGN]; u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN); struct scatter_walk src_sg_walk; struct scatter_walk assoc_sg_walk; struct scatter_walk dst_sg_walk; unsigned int i; /* Assuming we are supporting rfc4106 64-bit extended */ /* sequence numbers We need to have the AAD length equal */ /* to 8 or 12 bytes */ if (unlikely(req->assoclen != 8 && req->assoclen != 12)) return -EINVAL; if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16)) return -EINVAL; if (unlikely(key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && key_len != AES_KEYSIZE_256)) return -EINVAL; /* IV below built */ for (i = 0; i < 4; i++) *(iv+i) = ctx->nonce[i]; for (i = 0; i < 8; i++) *(iv+4+i) = req->iv[i]; *((__be32 *)(iv+12)) = counter; if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) { one_entry_in_sg = 1; scatterwalk_start(&src_sg_walk, req->src); scatterwalk_start(&assoc_sg_walk, req->assoc); src = scatterwalk_map(&src_sg_walk); assoc = scatterwalk_map(&assoc_sg_walk); dst = src; if (unlikely(req->src != req->dst)) { scatterwalk_start(&dst_sg_walk, req->dst); dst = scatterwalk_map(&dst_sg_walk); } } else { /* Allocate memory for src, dst, assoc */ src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen, GFP_ATOMIC); if (unlikely(!src)) return -ENOMEM; assoc = (src + req->cryptlen + auth_tag_len); scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); scatterwalk_map_and_copy(assoc, req->assoc, 0, req->assoclen, 0); dst = src; } aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv, ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst + ((unsigned long)req->cryptlen), auth_tag_len); /* The authTag (aka the Integrity Check Value) needs to be written * back to the packet. */ if (one_entry_in_sg) { if (unlikely(req->src != req->dst)) { scatterwalk_unmap(dst); scatterwalk_done(&dst_sg_walk, 0, 0); } scatterwalk_unmap(src); scatterwalk_unmap(assoc); scatterwalk_done(&src_sg_walk, 0, 0); scatterwalk_done(&assoc_sg_walk, 0, 0); } else { scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen + auth_tag_len, 1); kfree(src); } return 0; }
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), .tweak_fn = aesni_xts_tweak, .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), .crypt_fn = lrw_xts_encrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = xts_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), .tweak_fn = aesni_xts_tweak, .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), .crypt_fn = lrw_xts_decrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = xts_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } #endif #ifdef CONFIG_X86_64 static int rfc4106_init(struct crypto_tfm *tfm) { struct cryptd_aead *cryptd_tfm; struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); struct crypto_aead *cryptd_child; struct aesni_rfc4106_gcm_ctx *child_ctx; cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0); if (IS_ERR(cryptd_tfm)) return PTR_ERR(cryptd_tfm); cryptd_child = cryptd_aead_child(cryptd_tfm); child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); memcpy(child_ctx, ctx, sizeof(*ctx)); ctx->cryptd_tfm = cryptd_tfm; tfm->crt_aead.reqsize = sizeof(struct aead_request) + crypto_aead_reqsize(&cryptd_tfm->base); return 0; } static void rfc4106_exit(struct crypto_tfm *tfm) { struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); if (!IS_ERR(ctx->cryptd_tfm)) cryptd_free_aead(ctx->cryptd_tfm); return; } static void rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err) { struct aesni_gcm_set_hash_subkey_result *result = req->data; if (err == -EINPROGRESS) return; result->err = err; complete(&result->completion); } static int rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) { struct crypto_ablkcipher *ctr_tfm; struct ablkcipher_request *req; int ret = -EINVAL; struct aesni_hash_subkey_req_data *req_data; ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0); if (IS_ERR(ctr_tfm)) return PTR_ERR(ctr_tfm); crypto_ablkcipher_clear_flags(ctr_tfm, ~0); ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len); if (ret) goto out_free_ablkcipher; ret = -ENOMEM; req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL); if (!req) goto out_free_ablkcipher; req_data = kmalloc(sizeof(*req_data), GFP_KERNEL); if (!req_data) goto out_free_request; memset(req_data->iv, 0, sizeof(req_data->iv)); /* Clear the data in the hash sub key container to zero.*/ /* We want to cipher all zeros to create the hash sub key. */ memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE); init_completion(&req_data->result.completion); sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE); ablkcipher_request_set_tfm(req, ctr_tfm); ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, rfc4106_set_hash_subkey_done, &req_data->result); ablkcipher_request_set_crypt(req, &req_data->sg, &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv); ret = crypto_ablkcipher_encrypt(req); if (ret == -EINPROGRESS || ret == -EBUSY) { ret = wait_for_completion_interruptible (&req_data->result.completion); if (!ret) ret = req_data->result.err; } kfree(req_data); out_free_request: ablkcipher_request_free(req); out_free_ablkcipher: crypto_free_ablkcipher(ctr_tfm); return ret; } static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, unsigned int key_len) { int ret = 0; struct crypto_tfm *tfm = crypto_aead_tfm(parent); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); struct aesni_rfc4106_gcm_ctx *child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); u8 *new_key_align, *new_key_mem = NULL; if (key_len < 4) { crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } /*Account for 4 byte nonce at the end.*/ key_len -= 4; if (key_len != AES_KEYSIZE_128) { crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce)); /*This must be on a 16 byte boundary!*/ if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN) return -EINVAL; if ((unsigned long)key % AESNI_ALIGN) { /*key is not aligned: use an auxuliar aligned pointer*/ new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL); if (!new_key_mem) return -ENOMEM; new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN); memcpy(new_key_align, key, key_len); key = new_key_align; } if (!irq_fpu_usable()) ret = crypto_aes_expand_key(&(ctx->aes_key_expanded), key, key_len); else { kernel_fpu_begin(); ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len); kernel_fpu_end(); } /*This must be on a 16 byte boundary!*/ if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) { ret = -EINVAL; goto exit; } ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); memcpy(child_ctx, ctx, sizeof(*ctx)); exit: kfree(new_key_mem); return ret; } /* This is the Integrity Check Value (aka the authentication tag length and can * be 8, 12 or 16 bytes long. */ static int rfc4106_set_authsize(struct crypto_aead *parent, unsigned int authsize) { struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); switch (authsize) { case 8: case 12: case 16: break; default: return -EINVAL; } crypto_aead_crt(parent)->authsize = authsize; crypto_aead_crt(cryptd_child)->authsize = authsize; return 0; } static int rfc4106_encrypt(struct aead_request *req) { int ret; struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); if (!irq_fpu_usable()) { struct aead_request *cryptd_req = (struct aead_request *) aead_request_ctx(req); memcpy(cryptd_req, req, sizeof(*req)); aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_aead_encrypt(cryptd_req); } else { struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); kernel_fpu_begin(); ret = cryptd_child->base.crt_aead.encrypt(req); kernel_fpu_end(); return ret; } }
/* output_size must <= 512 bits (<= 64) */ enum ctr_drbg_status_t block_cipher_df(struct ctr_drbg_ctx_s *ctx, const uint8_t *input, uint32_t input_size, uint8_t *output, uint32_t output_size) { enum ctr_drbg_status_t ret_val = CTR_DRBG_SUCCESS; uint32_t s_len = 0; uint32_t s_pad_len = 0; uint8_t temp[32]; uint32_t out_len = 0; uint8_t siv_string[64]; uint8_t *p_s_string = NULL; int rc; struct scatterlist sg_in, sg_out; if (output_size > 64) return CTR_DRBG_INVALID_ARG; s_len = input_size + 9; s_pad_len = s_len % 16; if (0 != s_pad_len) s_len += (16 - s_pad_len); /* add the length of IV */ s_len += 16; if (s_len > 64) pr_debug("error! s_len is too big!!!!!!!!!!!!\n"); memset(siv_string, 0, 64); p_s_string = siv_string + 16; p_s_string[3] = input_size; p_s_string[7] = output_size; memcpy(p_s_string + 8, input, input_size); p_s_string[8 + input_size] = 0x80; if (0 < s_pad_len) memset(p_s_string + 9 + input_size, '\0', s_pad_len); ret_val = df_bcc_func(ctx, df_initial_k, siv_string, s_len, temp); if (CTR_DRBG_SUCCESS != ret_val) { pr_debug("df_bcc_func failed, returned %d", ret_val); goto out; } siv_string[3] = 0x1; ret_val = df_bcc_func(ctx, df_initial_k, siv_string, s_len, temp + 16); if (CTR_DRBG_SUCCESS != ret_val) goto out; out_len = 0; rc = crypto_ablkcipher_setkey(ctx->df_aes_ctx.tfm, temp, AES128_KEY_SIZE); if (rc) { pr_debug("crypto_ablkcipher_setkey API failed: %d", rc); goto out; } memcpy(ctx->df_aes_ctx.input.virt_addr, temp + 16, 16); while (out_len < output_size) { init_completion(&ctx->df_aes_ctx.result.completion); /* * Note: personalize these called routines for * specific testing. */ crypto_ablkcipher_clear_flags(ctx->df_aes_ctx.tfm, ~0); /* Encrypt some clear text! */ sg_init_one(&sg_in, ctx->df_aes_ctx.input.virt_addr, 16); sg_init_one(&sg_out, ctx->df_aes_ctx.output.virt_addr, 16); ablkcipher_request_set_crypt(ctx->df_aes_ctx.req, &sg_in, &sg_out, CTR_DRBG_BLOCK_LEN_BYTES, NULL); rc = crypto_ablkcipher_encrypt(ctx->df_aes_ctx.req); switch (rc) { case 0: break; case -EINPROGRESS: case -EBUSY: rc = wait_for_completion_interruptible( &ctx->df_aes_ctx.result.completion); if (!rc && !ctx->df_aes_ctx.result.err) { INIT_COMPLETION( ctx->df_aes_ctx.result.completion); break; } /* fall through */ default: break; } init_completion(&ctx->df_aes_ctx.result.completion); memcpy(output + out_len, ctx->df_aes_ctx.output.virt_addr, 16); memcpy(ctx->df_aes_ctx.input.virt_addr, output + out_len, 16); out_len += 16; } out: memset(siv_string, 0, 64); memset(temp, 0, 32); return ret_val; }
/* * the derivation functions to handle biased entropy input. */ enum ctr_drbg_status_t df_bcc_func(struct ctr_drbg_ctx_s *ctx, uint8_t *key, uint8_t *input, uint32_t input_size, uint8_t *output) { enum ctr_drbg_status_t ret_val = CTR_DRBG_SUCCESS; uint8_t *p; int rc; int i; int n; struct scatterlist sg_in, sg_out; if (0 != (input_size % CTR_DRBG_BLOCK_LEN_BYTES)) return CTR_DRBG_INVALID_ARG; n = input_size / CTR_DRBG_BLOCK_LEN_BYTES; for (i = 0; i < CTR_DRBG_BLOCK_LEN_BYTES; i++) ctx->df_aes_ctx.output.virt_addr[i] = 0; rc = crypto_ablkcipher_setkey(ctx->df_aes_ctx.tfm, key, AES128_KEY_SIZE); if (rc) { pr_debug("crypto_ablkcipher_setkey API failed: %d\n", rc); return CTR_DRBG_GENERAL_ERROR; } p = input; while (n > 0) { for (i = 0; i < CTR_DRBG_BLOCK_LEN_BYTES; i++, p++) ctx->df_aes_ctx.input.virt_addr[i] = ctx->df_aes_ctx.output.virt_addr[i] ^ (*p); init_completion(&ctx->df_aes_ctx.result.completion); /* * Note: personalize these called routines for * specific testing. */ crypto_ablkcipher_clear_flags(ctx->df_aes_ctx.tfm, ~0); /* Encrypt some clear text! */ sg_init_one(&sg_in, ctx->df_aes_ctx.input.virt_addr, 16); sg_init_one(&sg_out, ctx->df_aes_ctx.output.virt_addr, 16); ablkcipher_request_set_crypt(ctx->df_aes_ctx.req, &sg_in, &sg_out, CTR_DRBG_BLOCK_LEN_BYTES, NULL); rc = crypto_ablkcipher_encrypt(ctx->df_aes_ctx.req); switch (rc) { case 0: break; case -EINPROGRESS: case -EBUSY: rc = wait_for_completion_interruptible( &ctx->df_aes_ctx.result.completion); if (!rc && !ctx->df_aes_ctx.result.err) { INIT_COMPLETION( ctx->df_aes_ctx.result.completion); break; } /* fall through */ default: break; } init_completion(&ctx->df_aes_ctx.result.completion); n--; } for (i = 0; i < CTR_DRBG_BLOCK_LEN_BYTES; i++) output[i] = ctx->df_aes_ctx.output.virt_addr[i]; return ret_val; }
/* * Generate random bits. len_bits is specified in bits, as required by * NIST SP800-90. It fails with CTR_DRBG_NEEDS_RESEED if the number * of generates since instantiation or the last reseed >= the * reseed_interval supplied at instantiation. len_bits must be a * multiple of 8. len_bits must not exceed 2^19, as per NIST SP * 800-90. Optionally stirs in additional_input which is * additional_input_len_bits long, and is silently rounded up to a * multiple of 8. CTR_DRBG_INVALID_ARG is returned if any pointer arg * is null and the corresponding length is non-zero or if * additioanl_input_len_bits > 256. */ enum ctr_drbg_status_t ctr_drbg_generate_w_data(struct ctr_drbg_ctx_s *ctx, void *additional_input, size_t additional_input_len_bits, void *buffer, size_t len_bits) { size_t total_blocks = (len_bits + 127) / 128; enum ctr_drbg_status_t update_rv; int rv = 0; size_t i; int rc; struct scatterlist sg_in, sg_out; if (ctx == NULL) return CTR_DRBG_INVALID_ARG; if (buffer == NULL && len_bits > 0) return CTR_DRBG_INVALID_ARG; if (len_bits % 8 != 0) return CTR_DRBG_INVALID_ARG; if (len_bits > (1<<19)) return CTR_DRBG_INVALID_ARG; if ((additional_input == NULL && additional_input_len_bits > 0) || additional_input_len_bits > CTR_DRBG_SEED_LEN_BITS) return CTR_DRBG_INVALID_ARG; if (ctx->reseed_counter > ctx->reseed_interval) return CTR_DRBG_NEEDS_RESEED; rc = crypto_ablkcipher_setkey(ctx->aes_ctx.tfm, ctx->seed.key_V.key, AES128_KEY_SIZE); if (rc) { pr_debug("crypto_ablkcipher_setkey API failed: %d", rc); return CTR_DRBG_GENERAL_ERROR; } if (rv < 0) return CTR_DRBG_GENERAL_ERROR; if (!ctx->continuous_test_started) { increment_V(ctx); init_completion(&ctx->aes_ctx.result.completion); crypto_ablkcipher_clear_flags(ctx->aes_ctx.tfm, ~0); memcpy(ctx->aes_ctx.input.virt_addr, ctx->seed.key_V.V, 16); sg_init_one(&sg_in, ctx->aes_ctx.input.virt_addr, 16); sg_init_one(&sg_out, ctx->aes_ctx.output.virt_addr, 16); ablkcipher_request_set_crypt(ctx->aes_ctx.req, &sg_in, &sg_out, CTR_DRBG_BLOCK_LEN_BYTES, NULL); rc = crypto_ablkcipher_encrypt(ctx->aes_ctx.req); switch (rc) { case 0: break; case -EINPROGRESS: case -EBUSY: rc = wait_for_completion_interruptible( &ctx->aes_ctx.result.completion); if (!rc && !ctx->aes_ctx.result.err) { INIT_COMPLETION(ctx->aes_ctx.result.completion); break; } /* fall through */ default: pr_debug(":crypto_ablkcipher_encrypt returned with %d result %d on iteration\n", rc, ctx->aes_ctx.result.err); break; } init_completion(&ctx->aes_ctx.result.completion); memcpy(ctx->prev_drn, ctx->aes_ctx.output.virt_addr, 16); ctx->continuous_test_started = 1; } /* Generate the output */ for (i = 0; i < total_blocks; ++i) { /* Increment the counter */ increment_V(ctx); if (((len_bits % 128) != 0) && (i == (total_blocks - 1))) { /* last block and it's a fragment */ init_completion(&ctx->aes_ctx.result.completion); /* * Note: personalize these called routines for * specific testing. */ crypto_ablkcipher_clear_flags(ctx->aes_ctx.tfm, ~0); /* Encrypt some clear text! */ memcpy(ctx->aes_ctx.input.virt_addr, ctx->seed.key_V.V, 16); sg_init_one(&sg_in, ctx->aes_ctx.input.virt_addr, 16); sg_init_one(&sg_out, ctx->aes_ctx.output.virt_addr, 16); ablkcipher_request_set_crypt(ctx->aes_ctx.req, &sg_in, &sg_out, CTR_DRBG_BLOCK_LEN_BYTES, NULL); rc = crypto_ablkcipher_encrypt(ctx->aes_ctx.req); switch (rc) { case 0: break; case -EINPROGRESS: case -EBUSY: rc = wait_for_completion_interruptible( &ctx->aes_ctx.result.completion); if (!rc && !ctx->aes_ctx.result.err) { INIT_COMPLETION( ctx->aes_ctx.result.completion); break; } /* fall through */ default: break; } init_completion(&ctx->aes_ctx.result.completion); if (!memcmp(ctx->prev_drn, ctx->aes_ctx.output.virt_addr, 16)) return CTR_DRBG_GENERAL_ERROR; else memcpy(ctx->prev_drn, ctx->aes_ctx.output.virt_addr, 16); rv = 0; memcpy((uint8_t *)buffer + 16*i, ctx->aes_ctx.output.virt_addr, (len_bits % 128)/8); } else { /* normal case: encrypt direct to target buffer */ init_completion(&ctx->aes_ctx.result.completion); /* * Note: personalize these called routines for * specific testing. */ crypto_ablkcipher_clear_flags(ctx->aes_ctx.tfm, ~0); /* Encrypt some clear text! */ memcpy(ctx->aes_ctx.input.virt_addr, ctx->seed.key_V.V, 16); sg_init_one(&sg_in, ctx->aes_ctx.input.virt_addr, 16); sg_init_one(&sg_out, ctx->aes_ctx.output.virt_addr, 16); ablkcipher_request_set_crypt(ctx->aes_ctx.req, &sg_in, &sg_out, CTR_DRBG_BLOCK_LEN_BYTES, NULL); rc = crypto_ablkcipher_encrypt(ctx->aes_ctx.req); switch (rc) { case 0: break; case -EINPROGRESS: case -EBUSY: rc = wait_for_completion_interruptible( &ctx->aes_ctx.result.completion); if (!rc && !ctx->aes_ctx.result.err) { INIT_COMPLETION( ctx->aes_ctx.result.completion); break; } /* fall through */ default: break; } if (!memcmp(ctx->prev_drn, ctx->aes_ctx.output.virt_addr, 16)) return CTR_DRBG_GENERAL_ERROR; else memcpy(ctx->prev_drn, ctx->aes_ctx.output.virt_addr, 16); memcpy((uint8_t *)buffer + 16*i, ctx->aes_ctx.output.virt_addr, 16); rv = 0; } } update_rv = update(ctx, additional_input, (additional_input_len_bits + 7) / 8); /* round up */ if (update_rv != CTR_DRBG_SUCCESS) return update_rv; ctx->reseed_counter += 1; return CTR_DRBG_SUCCESS; }
/* * The NIST update function. It updates the key and V to new values * (to prevent backtracking) and optionally stirs in data. data may * be null, otherwise *data is from 0 to 256 bits long. * keysched is an optional keyschedule to use as an optimization. It * must be consistent with the key in *ctx. No changes are made to * *ctx until it is assured that there will be no failures. Note that * data_len is in bytes. (That may not be offical NIST * recommendation, but I do it anyway; they say "or equivalent" and * this is equivalent enough.) */ static enum ctr_drbg_status_t update(struct ctr_drbg_ctx_s *ctx, const uint8_t *data, size_t data_len) { uint8_t temp[32]; unsigned int i; int rc; struct scatterlist sg_in, sg_out; for (i = 0; i < 2; ++i) { increment_V(ctx); init_completion(&ctx->aes_ctx.result.completion); /* * Note: personalize these called routines for * specific testing. */ memcpy(ctx->aes_ctx.input.virt_addr, ctx->seed.key_V.V, CTR_DRBG_BLOCK_LEN_BYTES); crypto_ablkcipher_clear_flags(ctx->aes_ctx.tfm, ~0); /* Encrypt some clear text! */ sg_init_one(&sg_in, ctx->aes_ctx.input.virt_addr, AES128_BLOCK_SIZE); sg_init_one(&sg_out, ctx->aes_ctx.output.virt_addr, AES128_BLOCK_SIZE); ablkcipher_request_set_crypt(ctx->aes_ctx.req, &sg_in, &sg_out, CTR_DRBG_BLOCK_LEN_BYTES, NULL); rc = crypto_ablkcipher_encrypt(ctx->aes_ctx.req); switch (rc) { case 0: break; case -EINPROGRESS: case -EBUSY: rc = wait_for_completion_interruptible( &ctx->aes_ctx.result.completion); if (!rc && !ctx->aes_ctx.result.err) { INIT_COMPLETION(ctx->aes_ctx.result.completion); break; } /* fall through */ default: pr_debug("crypto_ablkcipher_encrypt returned"); pr_debug(" with %d result %d on iteration\n", rc, ctx->aes_ctx.result.err); break; } init_completion(&ctx->aes_ctx.result.completion); memcpy(temp + AES128_BLOCK_SIZE * i, ctx->aes_ctx.output.virt_addr, AES128_BLOCK_SIZE); } if (data_len > 0) pr_debug("in upadte, data_len = %zu\n", data_len); for (i = 0; i < data_len; ++i) ctx->seed.as_bytes[i] = temp[i] ^ data[i]; /* now copy the rest of temp to key and V */ if (32 > data_len) { memcpy(ctx->seed.as_bytes + data_len, temp + data_len, 32 - data_len); } memset(temp, 0, 32); return CTR_DRBG_SUCCESS; }
/** * fname_encrypt() - * * This function encrypts the input filename, and returns the length of the * ciphertext. Errors are returned as negative numbers. We trust the caller to * allocate sufficient memory to oname string. */ static int fname_encrypt(struct inode *inode, const struct qstr *iname, struct fscrypt_str *oname) { u32 ciphertext_len; struct ablkcipher_request *req = NULL; DECLARE_FS_COMPLETION_RESULT(ecr); struct fscrypt_info *ci = inode->i_crypt_info; struct crypto_ablkcipher *tfm = ci->ci_ctfm; int res = 0; char iv[FS_CRYPTO_BLOCK_SIZE]; struct scatterlist src_sg, dst_sg; int padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK); char *workbuf, buf[32], *alloc_buf = NULL; unsigned lim; lim = inode->i_sb->s_cop->max_namelen(inode); if (iname->len <= 0 || iname->len > lim) return -EIO; ciphertext_len = (iname->len < FS_CRYPTO_BLOCK_SIZE) ? FS_CRYPTO_BLOCK_SIZE : iname->len; ciphertext_len = size_round_up(ciphertext_len, padding); ciphertext_len = (ciphertext_len > lim) ? lim : ciphertext_len; if (ciphertext_len <= sizeof(buf)) { workbuf = buf; } else { alloc_buf = kmalloc(ciphertext_len, GFP_NOFS); if (!alloc_buf) return -ENOMEM; workbuf = alloc_buf; } /* Allocate request */ req = ablkcipher_request_alloc(tfm, GFP_NOFS); if (!req) { printk_ratelimited(KERN_ERR "%s: crypto_request_alloc() failed\n", __func__); kfree(alloc_buf); return -ENOMEM; } ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, dir_crypt_complete, &ecr); /* Copy the input */ memcpy(workbuf, iname->name, iname->len); if (iname->len < ciphertext_len) memset(workbuf + iname->len, 0, ciphertext_len - iname->len); /* Initialize IV */ memset(iv, 0, FS_CRYPTO_BLOCK_SIZE); /* Create encryption request */ sg_init_one(&src_sg, workbuf, ciphertext_len); sg_init_one(&dst_sg, oname->name, ciphertext_len); ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv); res = crypto_ablkcipher_encrypt(req); if (res == -EINPROGRESS || res == -EBUSY) { wait_for_completion(&ecr.completion); res = ecr.res; } kfree(alloc_buf); ablkcipher_request_free(req); if (res < 0) printk_ratelimited(KERN_ERR "%s: Error (error code %d)\n", __func__, res); oname->len = ciphertext_len; return res; }