static int nitrox_rfc4106_set_aead_rctx_sglist(struct aead_request *areq) { struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq); struct nitrox_aead_rctx *aead_rctx = &rctx->base; unsigned int assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE; struct scatterlist *sg; if (areq->assoclen != 16 && areq->assoclen != 20) return -EINVAL; scatterwalk_map_and_copy(rctx->assoc, areq->src, 0, assoclen, 0); sg_init_table(rctx->src, 3); sg_set_buf(rctx->src, rctx->assoc, assoclen); sg = scatterwalk_ffwd(rctx->src + 1, areq->src, areq->assoclen); if (sg != rctx->src + 1) sg_chain(rctx->src, 2, sg); if (areq->src != areq->dst) { sg_init_table(rctx->dst, 3); sg_set_buf(rctx->dst, rctx->assoc, assoclen); sg = scatterwalk_ffwd(rctx->dst + 1, areq->dst, areq->assoclen); if (sg != rctx->dst + 1) sg_chain(rctx->dst, 2, sg); } aead_rctx->src = rctx->src; aead_rctx->dst = (areq->src == areq->dst) ? rctx->src : rctx->dst; return 0; }
static int crypto_ccm_init_crypt(struct aead_request *req, u8 *tag) { struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); struct scatterlist *sg; u8 *iv = req->iv; int err; err = crypto_ccm_check_iv(iv); if (err) return err; pctx->flags = aead_request_flags(req); /* Note: rfc 3610 and NIST 800-38C require counter of * zero to encrypt auth tag. */ memset(iv + 15 - iv[0], 0, iv[0] + 1); sg_init_table(pctx->src, 3); sg_set_buf(pctx->src, tag, 16); sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen); if (sg != pctx->src + 1) sg_chain(pctx->src, 2, sg); if (req->src != req->dst) { sg_init_table(pctx->dst, 3); sg_set_buf(pctx->dst, tag, 16); sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen); if (sg != pctx->dst + 1) sg_chain(pctx->dst, 2, sg); } return 0; }
static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req) { struct crypto_rfc4309_req_ctx *rctx = aead_request_ctx(req); struct aead_request *subreq = &rctx->subreq; struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead); struct crypto_aead *child = ctx->child; struct scatterlist *sg; u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), crypto_aead_alignmask(child) + 1); /* L' */ iv[0] = 3; memcpy(iv + 1, ctx->nonce, 3); memcpy(iv + 4, req->iv, 8); scatterwalk_map_and_copy(iv + 16, req->src, 0, req->assoclen - 8, 0); sg_init_table(rctx->src, 3); sg_set_buf(rctx->src, iv + 16, req->assoclen - 8); sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen); if (sg != rctx->src + 1) sg_chain(rctx->src, 2, sg); if (req->src != req->dst) { sg_init_table(rctx->dst, 3); sg_set_buf(rctx->dst, iv + 16, req->assoclen - 8); sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen); if (sg != rctx->dst + 1) sg_chain(rctx->dst, 2, sg); } aead_request_set_tfm(subreq, child); aead_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); aead_request_set_crypt(subreq, rctx->src, req->src == req->dst ? rctx->src : rctx->dst, req->cryptlen, iv); aead_request_set_ad(subreq, req->assoclen - 8); return subreq; }
void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, unsigned int start, unsigned int nbytes, int out) { struct scatter_walk walk; struct scatterlist tmp[2]; if (!nbytes) return; sg = scatterwalk_ffwd(tmp, sg, start); scatterwalk_start(&walk, sg); scatterwalk_copychunks(buf, &walk, nbytes, out); scatterwalk_done(&walk, out, 0); }
void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, unsigned int start, unsigned int nbytes, int out) { struct scatter_walk walk; struct scatterlist tmp[2]; if (!nbytes) return; sg = scatterwalk_ffwd(tmp, sg, start); if (sg_page(sg) == virt_to_page(buf) && sg->offset == offset_in_page(buf)) return; scatterwalk_start(&walk, sg); scatterwalk_copychunks(buf, &walk, nbytes, out); scatterwalk_done(&walk, out, 0); }
static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, size_t desclen) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct device *dev = ctx->dev; struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); struct rsa_edesc *edesc; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0; int sgc; int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; int src_nents, dst_nents; int lzeros; lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags); if (lzeros < 0) return ERR_PTR(lzeros); req->src_len -= lzeros; req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros); src_nents = sg_nents_for_len(req->src, req->src_len); dst_nents = sg_nents_for_len(req->dst, req->dst_len); if (src_nents > 1) sec4_sg_len = src_nents; if (dst_nents > 1) sec4_sg_len += dst_nents; sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); /* allocate space for base edesc, hw desc commands and link tables */ edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, GFP_DMA | flags); if (!edesc) return ERR_PTR(-ENOMEM); sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE); if (unlikely(!sgc)) { dev_err(dev, "unable to map source\n"); goto src_fail; } sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); if (unlikely(!sgc)) { dev_err(dev, "unable to map destination\n"); goto dst_fail; } edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen; sec4_sg_index = 0; if (src_nents > 1) { sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); sec4_sg_index += src_nents; } if (dst_nents > 1) sg_to_sec4_sg_last(req->dst, dst_nents, edesc->sec4_sg + sec4_sg_index, 0); /* Save nents for later use in Job Descriptor */ edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; if (!sec4_sg_bytes) return edesc; edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(dev, edesc->sec4_sg_dma)) { dev_err(dev, "unable to map S/G table\n"); goto sec4_sg_fail; } edesc->sec4_sg_bytes = sec4_sg_bytes; return edesc; sec4_sg_fail: dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); dst_fail: dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); src_fail: kfree(edesc); return ERR_PTR(-ENOMEM); }
static int ccm_encrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); struct blkcipher_desc desc = { .info = req->iv }; struct blkcipher_walk walk; struct scatterlist srcbuf[2]; struct scatterlist dstbuf[2]; struct scatterlist *src; struct scatterlist *dst; u8 __aligned(8) mac[AES_BLOCK_SIZE]; u8 buf[AES_BLOCK_SIZE]; u32 len = req->cryptlen; int err; err = ccm_init_mac(req, mac, len); if (err) return err; kernel_neon_begin_partial(6); if (req->assoclen) ccm_calculate_auth_mac(req, mac); /* preserve the original iv for the final round */ memcpy(buf, req->iv, AES_BLOCK_SIZE); src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen); dst = src; if (req->src != req->dst) dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen); blkcipher_walk_init(&walk, dst, src, len); err = blkcipher_aead_walk_virt_block(&desc, &walk, aead, AES_BLOCK_SIZE); while (walk.nbytes) { u32 tail = walk.nbytes % AES_BLOCK_SIZE; if (walk.nbytes == len) tail = 0; ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes - tail, ctx->key_enc, num_rounds(ctx), mac, walk.iv); len -= walk.nbytes - tail; err = blkcipher_walk_done(&desc, &walk, tail); } if (!err) ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); kernel_neon_end(); if (err) return err; /* copy authtag to end of dst */ scatterwalk_map_and_copy(mac, dst, req->cryptlen, crypto_aead_authsize(aead), 1); return 0; } static int ccm_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); unsigned int authsize = crypto_aead_authsize(aead); struct blkcipher_desc desc = { .info = req->iv }; struct blkcipher_walk walk; struct scatterlist srcbuf[2]; struct scatterlist dstbuf[2]; struct scatterlist *src; struct scatterlist *dst; u8 __aligned(8) mac[AES_BLOCK_SIZE]; u8 buf[AES_BLOCK_SIZE]; u32 len = req->cryptlen - authsize; int err; err = ccm_init_mac(req, mac, len); if (err) return err; kernel_neon_begin_partial(6); if (req->assoclen) ccm_calculate_auth_mac(req, mac); /* preserve the original iv for the final round */ memcpy(buf, req->iv, AES_BLOCK_SIZE); src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen); dst = src; if (req->src != req->dst) dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen); blkcipher_walk_init(&walk, dst, src, len); err = blkcipher_aead_walk_virt_block(&desc, &walk, aead, AES_BLOCK_SIZE); while (walk.nbytes) { u32 tail = walk.nbytes % AES_BLOCK_SIZE; if (walk.nbytes == len) tail = 0; ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes - tail, ctx->key_enc, num_rounds(ctx), mac, walk.iv); len -= walk.nbytes - tail; err = blkcipher_walk_done(&desc, &walk, tail); } if (!err) ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); kernel_neon_end(); if (err) return err; /* compare calculated auth tag with the stored one */ scatterwalk_map_and_copy(buf, src, req->cryptlen - authsize, authsize, 0); if (crypto_memneq(mac, buf, authsize)) return -EBADMSG; return 0; } static struct aead_alg ccm_aes_alg = { .base = { .cra_name = "ccm(aes)", .cra_driver_name = "ccm-aes-ce", .cra_flags = CRYPTO_ALG_AEAD_NEW, .cra_priority = 300, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct crypto_aes_ctx), .cra_alignmask = 7, .cra_module = THIS_MODULE, }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = AES_BLOCK_SIZE, .setkey = ccm_setkey, .setauthsize = ccm_setauthsize, .encrypt = ccm_encrypt, .decrypt = ccm_decrypt, }; static int __init aes_mod_init(void) { if (!(elf_hwcap & HWCAP_AES)) return -ENODEV; return crypto_register_aead(&ccm_aes_alg); } static void __exit aes_mod_exit(void) { crypto_unregister_aead(&ccm_aes_alg); }
static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd, struct aead_request *req) { int alen, clen, cryptlen, assoclen, ret; struct crypto_aead *aead = crypto_aead_reqtfm(req); unsigned int authlen = crypto_aead_authsize(aead); struct scatterlist *tmp, sg_arr[2]; int nsg; u16 flags; assoclen = req->assoclen; cryptlen = req->cryptlen; if (dd->flags & FLAGS_RFC4106_GCM) assoclen -= 8; if (!(dd->flags & FLAGS_ENCRYPT)) cryptlen -= authlen; alen = ALIGN(assoclen, AES_BLOCK_SIZE); clen = ALIGN(cryptlen, AES_BLOCK_SIZE); nsg = !!(assoclen && cryptlen); omap_aes_clear_copy_flags(dd); sg_init_table(dd->in_sgl, nsg + 1); if (assoclen) { tmp = req->src; ret = omap_crypto_align_sg(&tmp, assoclen, AES_BLOCK_SIZE, dd->in_sgl, OMAP_CRYPTO_COPY_DATA | OMAP_CRYPTO_ZERO_BUF | OMAP_CRYPTO_FORCE_SINGLE_ENTRY, FLAGS_ASSOC_DATA_ST_SHIFT, &dd->flags); } if (cryptlen) { tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen); ret = omap_crypto_align_sg(&tmp, cryptlen, AES_BLOCK_SIZE, &dd->in_sgl[nsg], OMAP_CRYPTO_COPY_DATA | OMAP_CRYPTO_ZERO_BUF | OMAP_CRYPTO_FORCE_SINGLE_ENTRY, FLAGS_IN_DATA_ST_SHIFT, &dd->flags); } dd->in_sg = dd->in_sgl; dd->total = cryptlen; dd->assoc_len = assoclen; dd->authsize = authlen; dd->out_sg = req->dst; dd->orig_out = req->dst; dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, assoclen); flags = 0; if (req->src == req->dst || dd->out_sg == sg_arr) flags |= OMAP_CRYPTO_FORCE_COPY; ret = omap_crypto_align_sg(&dd->out_sg, cryptlen, AES_BLOCK_SIZE, &dd->out_sgl, flags, FLAGS_OUT_DATA_ST_SHIFT, &dd->flags); if (ret) return ret; dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen); dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen); return 0; }