static void qce_ablkcipher_done(void *data) { struct crypto_async_request *async_req = data; struct ablkcipher_request *req = ablkcipher_request_cast(async_req); struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); struct qce_device *qce = tmpl->qce; enum dma_data_direction dir_src, dir_dst; u32 status; int error; bool diff_dst; diff_dst = (req->src != req->dst) ? true : false; dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; error = qce_dma_terminate_all(&qce->dma); if (error) dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n", error); if (diff_dst) dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src); dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); sg_free_table(&rctx->dst_tbl); error = qce_check_status(qce, &status); if (error < 0) dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status); qce->async_req_done(tmpl->qce, error); }
static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) { struct ahash_request *req = ahash_request_cast(async_req); struct qce_sha_reqctx *rctx = ahash_request_ctx(req); struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm); struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); struct qce_device *qce = tmpl->qce; unsigned long flags = rctx->flags; int ret; if (IS_SHA_HMAC(flags)) { rctx->authkey = ctx->authkey; rctx->authklen = QCE_SHA_HMAC_KEY_SIZE; } else if (IS_CMAC(flags)) { rctx->authkey = ctx->authkey; rctx->authklen = AES_KEYSIZE_128; } rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); if (rctx->src_nents < 0) { dev_err(qce->dev, "Invalid numbers of src SG.\n"); return rctx->src_nents; } ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); if (ret < 0) return ret; sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); if (ret < 0) goto error_unmap_src; ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents, &rctx->result_sg, 1, qce_ahash_done, async_req); if (ret) goto error_unmap_dst; qce_dma_issue_pending(&qce->dma); ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0); if (ret) goto error_terminate; return 0; error_terminate: qce_dma_terminate_all(&qce->dma); error_unmap_dst: dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); error_unmap_src: dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); return ret; }
static void qce_ahash_done(void *data) { struct crypto_async_request *async_req = data; struct ahash_request *req = ahash_request_cast(async_req); struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct qce_sha_reqctx *rctx = ahash_request_ctx(req); struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); struct qce_device *qce = tmpl->qce; struct qce_result_dump *result = qce->dma.result_buf; unsigned int digestsize = crypto_ahash_digestsize(ahash); int error; u32 status; error = qce_dma_terminate_all(&qce->dma); if (error) dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error); qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, rctx->src_chained); qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); memcpy(rctx->digest, result->auth_iv, digestsize); if (req->result) memcpy(req->result, result->auth_iv, digestsize); rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]); rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]); error = qce_check_status(qce, &status); if (error < 0) dev_dbg(qce->dev, "ahash operation error (%x)\n", status); req->src = rctx->src_orig; req->nbytes = rctx->nbytes_orig; rctx->last_blk = false; rctx->first_blk = false; qce->async_req_done(tmpl->qce, error); }
static int qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req) { struct ablkcipher_request *req = ablkcipher_request_cast(async_req); struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); struct qce_device *qce = tmpl->qce; enum dma_data_direction dir_src, dir_dst; struct scatterlist *sg; bool diff_dst; gfp_t gfp; int ret; rctx->iv = req->info; rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher); rctx->cryptlen = req->nbytes; diff_dst = (req->src != req->dst) ? true : false; dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); if (diff_dst) rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes); else rctx->dst_nents = rctx->src_nents; if (rctx->src_nents < 0) { dev_err(qce->dev, "Invalid numbers of src SG.\n"); return rctx->src_nents; } if (rctx->dst_nents < 0) { dev_err(qce->dev, "Invalid numbers of dst SG.\n"); return -rctx->dst_nents; } rctx->dst_nents += 1; gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp); if (ret) return ret; sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); sg = qce_sgtable_add(&rctx->dst_tbl, req->dst); if (IS_ERR(sg)) { ret = PTR_ERR(sg); goto error_free; } sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg); if (IS_ERR(sg)) { ret = PTR_ERR(sg); goto error_free; } sg_mark_end(sg); rctx->dst_sg = rctx->dst_tbl.sgl; ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); if (ret < 0) goto error_free; if (diff_dst) { ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src); if (ret < 0) goto error_unmap_dst; rctx->src_sg = req->src; } else { rctx->src_sg = rctx->dst_sg; } ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents, rctx->dst_sg, rctx->dst_nents, qce_ablkcipher_done, async_req); if (ret) goto error_unmap_src; qce_dma_issue_pending(&qce->dma); ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0); if (ret) goto error_terminate; return 0; error_terminate: qce_dma_terminate_all(&qce->dma); error_unmap_src: if (diff_dst) dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src); error_unmap_dst: dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); error_free: sg_free_table(&rctx->dst_tbl); return ret; }