int ss_aes_one_req(sunxi_ss_t *sss, struct ablkcipher_request *req) { int ret = 0; struct crypto_ablkcipher *tfm = NULL; ss_aes_ctx_t *ctx = NULL; ss_aes_req_ctx_t *req_ctx = NULL; SS_ENTER(); if (!req->src || !req->dst) { SS_ERR("Invalid sg: src = %p, dst = %p\n", req->src, req->dst); return -EINVAL; } ss_dev_lock(); tfm = crypto_ablkcipher_reqtfm(req); req_ctx = ablkcipher_request_ctx(req); ctx = crypto_ablkcipher_ctx(tfm); /* A31 SS need update key each cycle in decryption. */ if ((ctx->comm.flags & SS_FLAG_NEW_KEY) || (req_ctx->dir == SS_DIR_DECRYPT)) { SS_DBG("KEY address = %p, size = %d\n", ctx->key, ctx->key_size); ss_key_set(ctx->key, ctx->key_size); ctx->comm.flags &= ~SS_FLAG_NEW_KEY; } #ifdef SS_CTS_MODE_ENABLE if (((req_ctx->mode == SS_AES_MODE_CBC) || (req_ctx->mode == SS_AES_MODE_CTS)) && (req->info != NULL)) { #else if ((req_ctx->mode == SS_AES_MODE_CBC) && (req->info != NULL)) { #endif SS_DBG("IV address = %p, size = %d\n", req->info, crypto_ablkcipher_ivsize(tfm)); ss_iv_set(req->info, crypto_ablkcipher_ivsize(tfm)); } #ifdef SS_CTR_MODE_ENABLE if (req_ctx->mode == SS_AES_MODE_CTR) { SS_DBG("Cnt address = %p, size = %d\n", req->info, crypto_ablkcipher_ivsize(tfm)); if (ctx->cnt == 0) memcpy(ctx->iv, req->info, crypto_ablkcipher_ivsize(tfm)); SS_DBG("CNT: %08x %08x %08x %08x \n", *(int *)&ctx->iv[0], *(int *)&ctx->iv[4], *(int *)&ctx->iv[8], *(int *)&ctx->iv[12]); ss_cnt_set(ctx->iv, crypto_ablkcipher_ivsize(tfm)); } #endif req_ctx->dma_src.sg = req->src; req_ctx->dma_dst.sg = req->dst; ret = ss_aes_start(ctx, req_ctx, req->nbytes); if (ret < 0) SS_ERR("ss_aes_start fail(%d)\n", ret); ss_dev_unlock(); #ifdef SS_CTR_MODE_ENABLE if (req_ctx->mode == SS_AES_MODE_CTR) { ss_cnt_get(ctx->comm.flow, ctx->iv, crypto_ablkcipher_ivsize(tfm)); SS_DBG("CNT: %08x %08x %08x %08x \n", *(int *)&ctx->iv[0], *(int *)&ctx->iv[4], *(int *)&ctx->iv[8], *(int *)&ctx->iv[12]); } #endif ctx->cnt += req->nbytes; if (req->base.complete) req->base.complete(&req->base, ret); return ret; } irqreturn_t sunxi_ss_irq_handler(int irq, void *dev_id) { sunxi_ss_t *sss = (sunxi_ss_t *)dev_id; unsigned long flags = 0; int pending = 0; spin_lock_irqsave(&sss->lock, flags); pending = ss_pending_get(); SS_DBG("SS pending %#x\n", pending); spin_unlock_irqrestore(&sss->lock, flags); return IRQ_HANDLED; }
static int ss_aes_one_req(sunxi_ss_t *sss, struct ablkcipher_request *req) { int ret = 0; struct crypto_ablkcipher *tfm = NULL; ss_aes_ctx_t *ctx = NULL; ss_aes_req_ctx_t *req_ctx = NULL; int key_map_flag = 0; int iv_map_flag = 0; SS_ENTER(); if (!req->src || !req->dst) { SS_ERR("Invalid sg: src = %p, dst = %p\n", req->src, req->dst); return -EINVAL; } ss_dev_lock(); tfm = crypto_ablkcipher_reqtfm(req); req_ctx = ablkcipher_request_ctx(req); ctx = crypto_ablkcipher_ctx(tfm); /* A31 SS need update key each cycle in decryption. */ if ((ctx->comm.flags & SS_FLAG_NEW_KEY) || (req_ctx->dir == SS_DIR_DECRYPT)) { SS_DBG("KEY address = %p, size = %d\n", ctx->key, ctx->key_size); ss_key_set(ctx->key, ctx->key_size); dma_map_single(&sss->pdev->dev, ctx->key, ctx->key_size, DMA_MEM_TO_DEV); key_map_flag = 1; ctx->comm.flags &= ~SS_FLAG_NEW_KEY; } #ifdef SS_CTS_MODE_ENABLE if (((req_ctx->mode == SS_AES_MODE_CBC) || (req_ctx->mode == SS_AES_MODE_CTS)) && (req->info != NULL)) { #else if ((req_ctx->mode == SS_AES_MODE_CBC) && (req->info != NULL)) { #endif SS_DBG("IV address = %p, size = %d\n", req->info, crypto_ablkcipher_ivsize(tfm)); memcpy(ctx->iv, req->info, crypto_ablkcipher_ivsize(tfm)); ss_iv_set(ctx->iv, crypto_ablkcipher_ivsize(tfm)); dma_map_single(&sss->pdev->dev, ctx->iv, crypto_ablkcipher_ivsize(tfm), DMA_MEM_TO_DEV); iv_map_flag = 1; } #ifdef SS_CTR_MODE_ENABLE if (req_ctx->mode == SS_AES_MODE_CTR) { SS_DBG("Cnt address = %p, size = %d\n", req->info, crypto_ablkcipher_ivsize(tfm)); if (ctx->cnt == 0) memcpy(ctx->iv, req->info, crypto_ablkcipher_ivsize(tfm)); SS_DBG("CNT: %08x %08x %08x %08x \n", *(int *)&ctx->iv[0], *(int *)&ctx->iv[4], *(int *)&ctx->iv[8], *(int *)&ctx->iv[12]); ss_cnt_set(ctx->iv, crypto_ablkcipher_ivsize(tfm)); dma_map_single(&sss->pdev->dev, ctx->iv, crypto_ablkcipher_ivsize(tfm), DMA_MEM_TO_DEV); iv_map_flag = 1; } #endif if (req_ctx->type == SS_METHOD_RSA) ss_rsa_width_set(crypto_ablkcipher_ivsize(tfm)); req_ctx->dma_src.sg = req->src; req_ctx->dma_dst.sg = req->dst; ret = ss_aes_start(ctx, req_ctx, req->nbytes); if (ret < 0) SS_ERR("ss_aes_start fail(%d)\n", ret); ss_dev_unlock(); if (req->base.complete) req->base.complete(&req->base, ret); if (key_map_flag == 1) dma_unmap_single(&ss_dev->pdev->dev, virt_to_phys(ctx->key), ctx->key_size, DMA_MEM_TO_DEV); if (iv_map_flag == 1) dma_unmap_single(&sss->pdev->dev, virt_to_phys(ctx->iv), crypto_ablkcipher_ivsize(tfm), DMA_MEM_TO_DEV); #ifdef SS_CTR_MODE_ENABLE if (req_ctx->mode == SS_AES_MODE_CTR) { ss_cnt_get(ctx->comm.flow, ctx->iv, crypto_ablkcipher_ivsize(tfm)); SS_DBG("CNT: %08x %08x %08x %08x \n", *(int *)&ctx->iv[0], *(int *)&ctx->iv[4], *(int *)&ctx->iv[8], *(int *)&ctx->iv[12]); } #endif ctx->cnt += req->nbytes; return ret; } static int ss_hash_one_req(sunxi_ss_t *sss, struct ahash_request *req) { int ret = 0; ss_aes_req_ctx_t *req_ctx = NULL; ss_hash_ctx_t *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); SS_ENTER(); if (!req->src) { SS_ERR("Invalid sg: src = %p\n", req->src); return -EINVAL; } ss_dev_lock(); req_ctx = ahash_request_ctx(req); req_ctx->dma_src.sg = req->src; ss_hash_padding_data_prepare(ctx, req->result, req->nbytes); ret = ss_hash_start(ctx, req_ctx, req->nbytes); if (ret < 0) SS_ERR("ss_hash_start fail(%d)\n", ret); ss_dev_unlock(); if (req->base.complete) req->base.complete(&req->base, ret); return ret; }