static int nitrox_aes_gcm_dec(struct aead_request *areq) { struct crypto_aead *aead = crypto_aead_reqtfm(areq); struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); struct nitrox_aead_rctx *rctx = aead_request_ctx(areq); struct se_crypto_request *creq = &rctx->nkreq.creq; struct flexi_crypto_context *fctx = nctx->u.fctx; int ret; memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE); rctx->cryptlen = areq->cryptlen - aead->authsize; rctx->assoclen = areq->assoclen; rctx->srclen = areq->cryptlen + areq->assoclen; rctx->dstlen = rctx->srclen - aead->authsize; rctx->iv = &areq->iv[GCM_AES_SALT_SIZE]; rctx->ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE; rctx->flags = areq->base.flags; rctx->ctx_handle = nctx->u.ctx_handle; rctx->src = areq->src; rctx->dst = areq->dst; rctx->ctrl_arg = DECRYPT; ret = nitrox_set_creq(rctx); if (ret) return ret; /* send the crypto request */ return nitrox_process_se_request(nctx->ndev, creq, nitrox_aead_callback, areq); }
int omap_aes_gcm_decrypt(struct aead_request *req) { struct omap_aes_reqctx *rctx = aead_request_ctx(req); memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE); return omap_aes_gcm_crypt(req, FLAGS_GCM); }
static int nitrox_rfc4106_set_aead_rctx_sglist(struct aead_request *areq) { struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq); struct nitrox_aead_rctx *aead_rctx = &rctx->base; unsigned int assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE; struct scatterlist *sg; if (areq->assoclen != 16 && areq->assoclen != 20) return -EINVAL; scatterwalk_map_and_copy(rctx->assoc, areq->src, 0, assoclen, 0); sg_init_table(rctx->src, 3); sg_set_buf(rctx->src, rctx->assoc, assoclen); sg = scatterwalk_ffwd(rctx->src + 1, areq->src, areq->assoclen); if (sg != rctx->src + 1) sg_chain(rctx->src, 2, sg); if (areq->src != areq->dst) { sg_init_table(rctx->dst, 3); sg_set_buf(rctx->dst, rctx->assoc, assoclen); sg = scatterwalk_ffwd(rctx->dst + 1, areq->dst, areq->assoclen); if (sg != rctx->dst + 1) sg_chain(rctx->dst, 2, sg); } aead_rctx->src = rctx->src; aead_rctx->dst = (areq->src == areq->dst) ? rctx->src : rctx->dst; return 0; }
static void cc_prepare_aead_data_dlli(struct aead_request *req, u32 *src_last_bytes, u32 *dst_last_bytes) { struct aead_req_ctx *areq_ctx = aead_request_ctx(req); enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; unsigned int authsize = areq_ctx->req_authsize; areq_ctx->is_icv_fragmented = false; if (req->src == req->dst) { /*INPLACE*/ areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) + (*src_last_bytes - authsize); areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) + (*src_last_bytes - authsize); } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) { /*NON-INPLACE and DECRYPT*/ areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) + (*src_last_bytes - authsize); areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) + (*src_last_bytes - authsize); } else { /*NON-INPLACE and ENCRYPT*/ areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) + (*dst_last_bytes - authsize); areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) + (*dst_last_bytes - authsize); } }
static inline int dx_aead_handle_frag_icv(struct device *dev, struct aead_request *req, int lbytes) { struct aead_req_ctx *areq_ctx = aead_request_ctx(req); if ((areq_ctx->gen_ctx.op_type == SEP_CRYPTO_DIRECTION_DECRYPT) || (req->src == req->dst)) { areq_ctx->frag_icv_sg = req->src; areq_ctx->frag_icv_sg_nents = areq_ctx->in_nents; /*ICV only fragments count, can be up to 2 fragments. More fragments return error -EINVAL */ if ((sg_dma_len(&req->src[areq_ctx->in_nents - 2]) < (areq_ctx->req_authsize - lbytes))) { goto err; } else if ((sg_dma_len(&req->src[areq_ctx->in_nents - 2]) == (areq_ctx->req_authsize - lbytes))) { areq_ctx->icv_only_frag = 2; } else { areq_ctx->icv_only_frag = 1; } } else { areq_ctx->frag_icv_sg = req->dst; areq_ctx->frag_icv_sg_nents = areq_ctx->out_nents; } areq_ctx->last_bytes = lbytes; areq_ctx->is_icv_frag = true; return 0; err: return -EINVAL; }
static int nitrox_rfc4106_dec(struct aead_request *areq) { struct crypto_aead *aead = crypto_aead_reqtfm(areq); struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq); struct nitrox_aead_rctx *aead_rctx = &rctx->base; struct se_crypto_request *creq = &aead_rctx->nkreq.creq; int ret; aead_rctx->cryptlen = areq->cryptlen - aead->authsize; aead_rctx->assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE; aead_rctx->srclen = areq->cryptlen - GCM_RFC4106_IV_SIZE + areq->assoclen; aead_rctx->dstlen = aead_rctx->srclen - aead->authsize; aead_rctx->iv = areq->iv; aead_rctx->ivsize = GCM_RFC4106_IV_SIZE; aead_rctx->flags = areq->base.flags; aead_rctx->ctx_handle = nctx->u.ctx_handle; aead_rctx->ctrl_arg = DECRYPT; ret = nitrox_rfc4106_set_aead_rctx_sglist(areq); if (ret) return ret; ret = nitrox_set_creq(aead_rctx); if (ret) return ret; /* send the crypto request */ return nitrox_process_se_request(nctx->ndev, creq, nitrox_rfc4106_callback, areq); }
static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req) { int err; struct aead_request *areq = &req->areq; struct pcrypt_request *preq = aead_request_ctx(areq); struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq); struct padata_priv *padata = pcrypt_request_padata(preq); struct crypto_aead *aead = aead_givcrypt_reqtfm(req); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); u32 flags = aead_request_flags(areq); memset(padata, 0, sizeof(struct padata_priv)); padata->parallel = pcrypt_aead_givenc; padata->serial = pcrypt_aead_giv_serial; aead_givcrypt_set_tfm(creq, ctx->child); aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, pcrypt_aead_done, areq); aead_givcrypt_set_crypt(creq, areq->src, areq->dst, areq->cryptlen, areq->iv); aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen); aead_givcrypt_set_giv(creq, req->giv, req->seq); err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); if (!err) return -EINPROGRESS; return err; }
static int seqiv_aead_decrypt(struct aead_request *req) { struct crypto_aead *geniv = crypto_aead_reqtfm(req); struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); struct aead_request *subreq = aead_request_ctx(req); crypto_completion_t compl; void *data; unsigned int ivsize = 8; if (req->cryptlen < ivsize + crypto_aead_authsize(geniv)) return -EINVAL; aead_request_set_tfm(subreq, ctx->child); compl = req->base.complete; data = req->base.data; aead_request_set_callback(subreq, req->base.flags, compl, data); aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen - ivsize, req->iv); aead_request_set_ad(subreq, req->assoclen + ivsize); scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); return crypto_aead_decrypt(subreq); }
static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode) { struct omap_aes_reqctx *rctx = aead_request_ctx(req); struct crypto_aead *aead = crypto_aead_reqtfm(req); unsigned int authlen = crypto_aead_authsize(aead); struct omap_aes_dev *dd; __be32 counter = cpu_to_be32(1); int err, assoclen; memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag)); memcpy(rctx->iv + GCM_AES_IV_SIZE, &counter, 4); err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv); if (err) return err; if (mode & FLAGS_RFC4106_GCM) assoclen = req->assoclen - 8; else assoclen = req->assoclen; if (assoclen + req->cryptlen == 0) { scatterwalk_map_and_copy(rctx->auth_tag, req->dst, 0, authlen, 1); return 0; } dd = omap_aes_find_dev(rctx); if (!dd) return -ENODEV; rctx->mode = mode; return omap_aes_gcm_handle_queue(dd, req); }
static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx( struct aead_request *req) { unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); }
void unmap_aead_request(struct device *dev, struct aead_request *req) { struct aead_req_ctx *areq_ctx = aead_request_ctx(req); unsigned int hw_iv_size = areq_ctx->hw_iv_size; /*HI3630++ DX: for SCCC bug*/ //HI3630 if (!areq_ctx->mac_buf_dma_addr) if (areq_ctx->mac_buf_dma_addr != 0) /*HI3630--*/ dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, MAX_MAC_SIZE, DMA_BIDIRECTIONAL); if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { /*HI3630++ DX: for SCCC bug*/ //HI3630 if (!areq_ctx->ccm_iv0_dma_addr) if (areq_ctx->ccm_iv0_dma_addr != 0) /*HI3630--*/ dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr, AES_BLOCK_SIZE, DMA_TO_DEVICE); if (&areq_ctx->ccm_adata_sg != NULL) dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE); } /*HI3630++ DX: for SCCC bug*/ //HI3630 if (!areq_ctx->gen_ctx.iv_dma_addr) if (areq_ctx->gen_ctx.iv_dma_addr != 0) /*HI3630--*/ dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr, hw_iv_size, DMA_BIDIRECTIONAL); /*In case a pool was set, a table was allocated and should be released */ if (areq_ctx->mlli_params.curr_pool != NULL) { DX_LOG_DEBUG("free MLLI buffer: dma=0x%08lX virt=0x%08X\n", (unsigned long)areq_ctx->mlli_params.mlli_dma_addr, (uint32_t)areq_ctx->mlli_params.mlli_virt_addr); dma_pool_free(areq_ctx->mlli_params.curr_pool, areq_ctx->mlli_params.mlli_virt_addr, areq_ctx->mlli_params.mlli_dma_addr); } if (areq_ctx->assoc_dma_buf_type != DX_DMA_BUF_NULL) { DX_LOG_DEBUG("Unmapping sg assoc: req->assoc=0x%08lX\n", (unsigned long)sg_virt(req->assoc)); dma_unmap_sg(dev, req->assoc, areq_ctx->assoc_nents, DMA_TO_DEVICE); } DX_LOG_DEBUG("Unmapping sg src: req->src=0x%08lX\n", (unsigned long)sg_virt(req->src)); dma_unmap_sg(dev, req->src, areq_ctx->in_nents, DMA_BIDIRECTIONAL); if (unlikely(req->src != req->dst)) { DX_LOG_DEBUG("Unmapping sg dst: req->dst=0x%08lX\n", (unsigned long)sg_virt(req->dst)); dma_unmap_sg(dev, req->dst, areq_ctx->out_nents, DMA_BIDIRECTIONAL); } }
static int gcm_aes_nx_decrypt(struct aead_request *req) { struct nx_gcm_rctx *rctx = aead_request_ctx(req); char *iv = rctx->iv; memcpy(iv, req->iv, 12); return gcm_aes_nx_crypt(req, 0, req->assoclen); }
int omap_aes_4106gcm_decrypt(struct aead_request *req) { struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct omap_aes_reqctx *rctx = aead_request_ctx(req); memcpy(rctx->iv, ctx->nonce, 4); memcpy(rctx->iv + 4, req->iv, 8); return omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM); }
static void pcrypt_aead_done(struct crypto_async_request *areq, int err) { struct aead_request *req = areq->data; struct pcrypt_request *preq = aead_request_ctx(req); struct padata_priv *padata = pcrypt_request_padata(preq); padata->info = err; req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; padata_do_serial(padata); }
static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd, struct aead_request *req) { struct omap_aes_ctx *ctx; struct aead_request *backlog; struct omap_aes_reqctx *rctx; unsigned long flags; int err, ret = 0; spin_lock_irqsave(&dd->lock, flags); if (req) ret = aead_enqueue_request(&dd->aead_queue, req); if (dd->flags & FLAGS_BUSY) { spin_unlock_irqrestore(&dd->lock, flags); return ret; } backlog = aead_get_backlog(&dd->aead_queue); req = aead_dequeue_request(&dd->aead_queue); if (req) dd->flags |= FLAGS_BUSY; spin_unlock_irqrestore(&dd->lock, flags); if (!req) return ret; if (backlog) backlog->base.complete(&backlog->base, -EINPROGRESS); ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); rctx = aead_request_ctx(req); dd->ctx = ctx; rctx->dd = dd; dd->aead_req = req; rctx->mode &= FLAGS_MODE_MASK; dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; err = omap_aes_gcm_copy_buffers(dd, req); if (err) return err; err = omap_aes_write_ctrl(dd); if (!err) err = omap_aes_crypt_dma_start(dd); if (err) { omap_aes_gcm_finish_req(dd, err); omap_aes_gcm_handle_queue(dd, NULL); } return ret; }
/** * cc_copy_mac() - Copy MAC to temporary location * * @dev: device object * @req: aead request object * @dir: [IN] copy from/to sgl */ static void cc_copy_mac(struct device *dev, struct aead_request *req, enum cc_sg_cpy_direct dir) { struct aead_req_ctx *areq_ctx = aead_request_ctx(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); u32 skip = req->assoclen + req->cryptlen; if (areq_ctx->is_gcm4543) skip += crypto_aead_ivsize(tfm); cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src, (skip - areq_ctx->req_authsize), skip, dir); }
static void nitrox_aead_callback(void *arg, int err) { struct aead_request *areq = arg; struct nitrox_aead_rctx *rctx = aead_request_ctx(areq); free_src_sglist(&rctx->nkreq); free_dst_sglist(&rctx->nkreq); if (err) { pr_err_ratelimited("request failed status 0x%0x\n", err); err = -EINVAL; } areq->base.complete(&areq->base, err); }
static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata, struct aead_request *req) { struct aead_req_ctx *areq_ctx = aead_request_ctx(req); u32 curr_mlli_size = 0; if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) { areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr; curr_mlli_size = areq_ctx->assoc.mlli_nents * LLI_ENTRY_BYTE_SIZE; } if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) { /*Inplace case dst nents equal to src nents*/ if (req->src == req->dst) { areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents; areq_ctx->src.sram_addr = drvdata->mlli_sram_addr + curr_mlli_size; areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr; if (!areq_ctx->is_single_pass) areq_ctx->assoc.mlli_nents += areq_ctx->src.mlli_nents; } else { if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) { areq_ctx->src.sram_addr = drvdata->mlli_sram_addr + curr_mlli_size; areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr + areq_ctx->src.mlli_nents * LLI_ENTRY_BYTE_SIZE; if (!areq_ctx->is_single_pass) areq_ctx->assoc.mlli_nents += areq_ctx->src.mlli_nents; } else { areq_ctx->dst.sram_addr = drvdata->mlli_sram_addr + curr_mlli_size; areq_ctx->src.sram_addr = areq_ctx->dst.sram_addr + areq_ctx->dst.mlli_nents * LLI_ENTRY_BYTE_SIZE; if (!areq_ctx->is_single_pass) areq_ctx->assoc.mlli_nents += areq_ctx->dst.mlli_nents; } } } }
static int gcm4106_aes_nx_decrypt(struct aead_request *req) { struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct nx_gcm_rctx *rctx = aead_request_ctx(req); char *iv = rctx->iv; char *nonce = nx_ctx->priv.gcm.nonce; memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); if (req->assoclen < 8) return -EINVAL; return gcm_aes_nx_crypt(req, 0, req->assoclen - 8); }
static int ccm4309_aes_nx_decrypt(struct aead_request *req) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_gcm_rctx *rctx = aead_request_ctx(req); struct blkcipher_desc desc; u8 *iv = rctx->iv; iv[0] = 3; memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); memcpy(iv + 4, req->iv, 8); desc.info = iv; desc.tfm = (struct crypto_blkcipher *)req->base.tfm; return ccm_nx_decrypt(req, &desc); }
static void omap_aes_gcm_done_task(struct omap_aes_dev *dd) { u8 *tag; int alen, clen, i, ret = 0, nsg; struct omap_aes_reqctx *rctx; alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE); clen = ALIGN(dd->total, AES_BLOCK_SIZE); rctx = aead_request_ctx(dd->aead_req); nsg = !!(dd->assoc_len && dd->total); dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE); dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE); omap_aes_crypt_dma_stop(dd); omap_crypto_cleanup(dd->out_sg, dd->orig_out, dd->aead_req->assoclen, dd->total, FLAGS_OUT_DATA_ST_SHIFT, dd->flags); if (dd->flags & FLAGS_ENCRYPT) scatterwalk_map_and_copy(rctx->auth_tag, dd->aead_req->dst, dd->total + dd->aead_req->assoclen, dd->authsize, 1); omap_crypto_cleanup(&dd->in_sgl[0], NULL, 0, alen, FLAGS_ASSOC_DATA_ST_SHIFT, dd->flags); omap_crypto_cleanup(&dd->in_sgl[nsg], NULL, 0, clen, FLAGS_IN_DATA_ST_SHIFT, dd->flags); if (!(dd->flags & FLAGS_ENCRYPT)) { tag = (u8 *)rctx->auth_tag; for (i = 0; i < dd->authsize; i++) { if (tag[i]) { dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n"); ret = -EBADMSG; } } } omap_aes_gcm_finish_req(dd, ret); omap_aes_gcm_handle_queue(dd, NULL); }
static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) { struct aead_request *subreq = aead_request_ctx(req); struct crypto_aead *geniv; if (err == -EINPROGRESS) return; if (err) goto out; geniv = crypto_aead_reqtfm(req); memcpy(req->iv, subreq->iv, crypto_aead_ivsize(geniv)); out: kzfree(subreq->iv); }
static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req) { struct crypto_rfc4309_req_ctx *rctx = aead_request_ctx(req); struct aead_request *subreq = &rctx->subreq; struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead); struct crypto_aead *child = ctx->child; struct scatterlist *sg; u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), crypto_aead_alignmask(child) + 1); /* L' */ iv[0] = 3; memcpy(iv + 1, ctx->nonce, 3); memcpy(iv + 4, req->iv, 8); scatterwalk_map_and_copy(iv + 16, req->src, 0, req->assoclen - 8, 0); sg_init_table(rctx->src, 3); sg_set_buf(rctx->src, iv + 16, req->assoclen - 8); sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen); if (sg != rctx->src + 1) sg_chain(rctx->src, 2, sg); if (req->src != req->dst) { sg_init_table(rctx->dst, 3); sg_set_buf(rctx->dst, iv + 16, req->assoclen - 8); sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen); if (sg != rctx->dst + 1) sg_chain(rctx->dst, 2, sg); } aead_request_set_tfm(subreq, child); aead_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); aead_request_set_crypt(subreq, rctx->src, req->src == req->dst ? rctx->src : rctx->dst, req->cryptlen, iv); aead_request_set_ad(subreq, req->assoclen - 8); return subreq; }
static int cc_aead_chain_iv(struct cc_drvdata *drvdata, struct aead_request *req, struct buffer_array *sg_data, bool is_last, bool do_chain) { struct aead_req_ctx *areq_ctx = aead_request_ctx(req); unsigned int hw_iv_size = areq_ctx->hw_iv_size; struct device *dev = drvdata_to_dev(drvdata); int rc = 0; if (!req->iv) { areq_ctx->gen_ctx.iv_dma_addr = 0; goto chain_iv_exit; } areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv, hw_iv_size, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) { dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", hw_iv_size, req->iv); rc = -ENOMEM; goto chain_iv_exit; } dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n", hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr); // TODO: what about CTR?? ask Ron if (do_chain && areq_ctx->plaintext_authenticate_only) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm); unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET; /* Chain to given list */ cc_add_buffer_entry(dev, sg_data, (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs), iv_size_to_authenc, is_last, &areq_ctx->assoc.mlli_nents); areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; } chain_iv_exit: return rc; }
static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req) { struct aead_request *subreq = aead_request_ctx(req); struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(aead); struct crypto_aead *child = ctx->child; u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), crypto_aead_alignmask(child) + 1); memcpy(iv, ctx->nonce, 4); memcpy(iv + 4, req->iv, 8); aead_request_set_tfm(subreq, child); aead_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv); aead_request_set_assoc(subreq, req->assoc, req->assoclen); return subreq; }
static int rfc4106_decrypt(struct aead_request *req) { int ret; struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); if (!irq_fpu_usable()) { struct aead_request *cryptd_req = (struct aead_request *) aead_request_ctx(req); memcpy(cryptd_req, req, sizeof(*req)); aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_aead_decrypt(cryptd_req); } else { struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); kernel_fpu_begin(); ret = cryptd_child->base.crt_aead.decrypt(req); kernel_fpu_end(); return ret; } }
static int simd_aead_decrypt(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); struct aead_request *subreq; struct crypto_aead *child; subreq = aead_request_ctx(req); *subreq = *req; if (!crypto_simd_usable() || (in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm))) child = &ctx->cryptd_tfm->base; else child = cryptd_aead_child(ctx->cryptd_tfm); aead_request_set_tfm(subreq, child); return crypto_aead_decrypt(subreq); }
void omap_aes_gcm_dma_out_callback(void *data) { struct omap_aes_dev *dd = data; struct omap_aes_reqctx *rctx; int i, val; u32 *auth_tag, tag[4]; if (!(dd->flags & FLAGS_ENCRYPT)) scatterwalk_map_and_copy(tag, dd->aead_req->src, dd->total + dd->aead_req->assoclen, dd->authsize, 0); rctx = aead_request_ctx(dd->aead_req); auth_tag = (u32 *)rctx->auth_tag; for (i = 0; i < 4; i++) { val = omap_aes_read(dd, AES_REG_TAG_N(dd, i)); auth_tag[i] = val ^ auth_tag[i]; if (!(dd->flags & FLAGS_ENCRYPT)) auth_tag[i] = auth_tag[i] ^ tag[i]; } omap_aes_gcm_done_task(dd); }
static void echainiv_encrypt_complete2(struct aead_request *req, int err) { struct aead_request *subreq = aead_request_ctx(req); struct crypto_aead *geniv; unsigned int ivsize; if (err == -EINPROGRESS) return; if (err) goto out; geniv = crypto_aead_reqtfm(req); ivsize = crypto_aead_ivsize(geniv); echainiv_write_iv(subreq->iv, ivsize); if (req->iv != subreq->iv) memcpy(req->iv, subreq->iv, ivsize); out: if (req->iv != subreq->iv) kzfree(subreq->iv); }
static int echainiv_encrypt(struct aead_request *req) { struct crypto_aead *geniv = crypto_aead_reqtfm(req); struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); struct aead_request *subreq = aead_request_ctx(req); crypto_completion_t compl; void *data; u8 *info; unsigned int ivsize = crypto_aead_ivsize(geniv); int err; if (req->cryptlen < ivsize) return -EINVAL; aead_request_set_tfm(subreq, ctx->geniv.child); compl = echainiv_encrypt_complete; data = req; info = req->iv; if (req->src != req->dst) { struct blkcipher_desc desc = { .tfm = ctx->null, }; err = crypto_blkcipher_encrypt( &desc, req->dst, req->src, req->assoclen + req->cryptlen); if (err) return err; } if (unlikely(!IS_ALIGNED((unsigned long)info, crypto_aead_alignmask(geniv) + 1))) { info = kmalloc(ivsize, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: GFP_ATOMIC); if (!info) return -ENOMEM; memcpy(info, req->iv, ivsize); } aead_request_set_callback(subreq, req->base.flags, compl, data); aead_request_set_crypt(subreq, req->dst, req->dst, req->cryptlen - ivsize, info); aead_request_set_ad(subreq, req->assoclen + ivsize); crypto_xor(info, ctx->salt, ivsize); scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); echainiv_read_iv(info, ivsize); err = crypto_aead_encrypt(subreq); echainiv_encrypt_complete2(req, err); return err; } static int echainiv_decrypt(struct aead_request *req) { struct crypto_aead *geniv = crypto_aead_reqtfm(req); struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); struct aead_request *subreq = aead_request_ctx(req); crypto_completion_t compl; void *data; unsigned int ivsize = crypto_aead_ivsize(geniv); if (req->cryptlen < ivsize + crypto_aead_authsize(geniv)) return -EINVAL; aead_request_set_tfm(subreq, ctx->geniv.child); compl = req->base.complete; data = req->base.data; aead_request_set_callback(subreq, req->base.flags, compl, data); aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen - ivsize, req->iv); aead_request_set_ad(subreq, req->assoclen + ivsize); scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); if (req->src != req->dst) scatterwalk_map_and_copy(req->iv, req->dst, req->assoclen, ivsize, 1); return crypto_aead_decrypt(subreq); } static int echainiv_init(struct crypto_tfm *tfm) { struct crypto_aead *geniv = __crypto_aead_cast(tfm); struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); int err; spin_lock_init(&ctx->geniv.lock); crypto_aead_set_reqsize(geniv, sizeof(struct aead_request)); err = crypto_get_default_rng(); if (err) goto out; err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, crypto_aead_ivsize(geniv)); crypto_put_default_rng(); if (err) goto out; ctx->null = crypto_get_default_null_skcipher(); err = PTR_ERR(ctx->null); if (IS_ERR(ctx->null)) goto out; err = aead_geniv_init(tfm); if (err) goto drop_null; ctx->geniv.child = geniv->child; geniv->child = geniv; out: return err; drop_null: crypto_put_default_null_skcipher(); goto out; } static void echainiv_exit(struct crypto_tfm *tfm) { struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_aead(ctx->geniv.child); crypto_put_default_null_skcipher(); } static int echainiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) { struct aead_instance *inst; struct crypto_aead_spawn *spawn; struct aead_alg *alg; int err; inst = aead_geniv_alloc(tmpl, tb, 0, 0); if (IS_ERR(inst)) return PTR_ERR(inst); spawn = aead_instance_ctx(inst); alg = crypto_spawn_aead_alg(spawn); if (alg->base.cra_aead.encrypt) goto done; err = -EINVAL; if (inst->alg.ivsize & (sizeof(u32) - 1) || inst->alg.ivsize > MAX_IV_SIZE) goto free_inst; inst->alg.encrypt = echainiv_encrypt; inst->alg.decrypt = echainiv_decrypt; inst->alg.base.cra_init = echainiv_init; inst->alg.base.cra_exit = echainiv_exit; inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; inst->alg.base.cra_ctxsize = sizeof(struct echainiv_ctx); inst->alg.base.cra_ctxsize += inst->alg.ivsize; done: err = aead_register_instance(tmpl, inst); if (err) goto free_inst; out: return err; free_inst: aead_geniv_free(inst); goto out; } static void echainiv_free(struct crypto_instance *inst) { aead_geniv_free(aead_instance(inst)); }