int ss_rng_get_random(struct crypto_rng *tfm, u8 *rdata, unsigned int dlen) { int ret = 0; ss_aes_ctx_t *ctx = crypto_rng_ctx(tfm); SS_DBG("flow = %d, rdata = %p, len = %d \n", ctx->comm.flow, rdata, dlen); if (ss_dev->suspend) { SS_ERR("SS has already suspend. \n"); return -EAGAIN; } ss_dev_lock(); /* Must set the seed addr in PRNG/TRNG. */ ss_key_set(ctx->key, ctx->key_size); dma_map_single(&ss_dev->pdev->dev, ctx->key, ctx->key_size, DMA_MEM_TO_DEV); ret = ss_rng_start(ctx, rdata, dlen); ss_dev_unlock(); SS_DBG("Get %d byte random. \n", ret); dma_unmap_single(&ss_dev->pdev->dev, virt_to_phys(ctx->key), ctx->key_size, DMA_MEM_TO_DEV); return ret; }
int ss_hash_final(struct ahash_request *req) { int pad_len = 0; ss_aes_req_ctx_t *req_ctx = ahash_request_ctx(req); ss_hash_ctx_t *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct scatterlist last = {0}; /* make a sg struct for padding data. */ if (req->result == NULL) { SS_ERR("Invalid result porinter. \n"); return -EINVAL; } SS_DBG("Method: %d, cnt: %d\n", req_ctx->type, ctx->cnt); if (ss_dev->suspend) { SS_ERR("SS has already suspend. \n"); return -EAGAIN; } /* Process the padding data. */ pad_len = ss_hash_padding(ctx, req_ctx->type == SS_METHOD_MD5 ? 0 : 1); SS_DBG("Pad len: %d \n", pad_len); req_ctx->dma_src.sg = &last; sg_init_table(&last, 1); sg_set_buf(&last, ctx->pad, pad_len); SS_DBG("Padding data: \n"); print_hex(ctx->pad, 128, (int)ctx->pad); ss_dev_lock(); ss_hash_start(ctx, req_ctx, pad_len); ss_sha_final(); SS_DBG("Method: %d, cnt: %d\n", req_ctx->type, ctx->cnt); ss_check_sha_end(); memcpy(req->result, ctx->md, ctx->md_size); ss_ctrl_stop(); ss_dev_unlock(); #ifdef SS_SHA_SWAP_FINAL_ENABLE if (req_ctx->type != SS_METHOD_MD5) ss_hash_swap(req->result, ctx->md_size); #endif return 0; }
static int sunxi_ss_suspend(struct device *dev) { #ifdef CONFIG_EVB_PLATFORM struct platform_device *pdev = to_platform_device(dev); sunxi_ss_t *sss = platform_get_drvdata(pdev); unsigned long flags = 0; SS_ENTER(); /* Wait for the completion of SS operation. */ ss_dev_lock(); spin_lock_irqsave(&ss_dev->lock, flags); sss->suspend = 1; spin_unlock_irqrestore(&sss->lock, flags); sunxi_ss_hw_exit(sss); ss_dev_unlock(); #endif return 0; }
int ss_aes_one_req(sunxi_ss_t *sss, struct ablkcipher_request *req) { int ret = 0; struct crypto_ablkcipher *tfm = NULL; ss_aes_ctx_t *ctx = NULL; ss_aes_req_ctx_t *req_ctx = NULL; SS_ENTER(); if (!req->src || !req->dst) { SS_ERR("Invalid sg: src = %p, dst = %p\n", req->src, req->dst); return -EINVAL; } ss_dev_lock(); tfm = crypto_ablkcipher_reqtfm(req); req_ctx = ablkcipher_request_ctx(req); ctx = crypto_ablkcipher_ctx(tfm); /* A31 SS need update key each cycle in decryption. */ if ((ctx->comm.flags & SS_FLAG_NEW_KEY) || (req_ctx->dir == SS_DIR_DECRYPT)) { SS_DBG("KEY address = %p, size = %d\n", ctx->key, ctx->key_size); ss_key_set(ctx->key, ctx->key_size); ctx->comm.flags &= ~SS_FLAG_NEW_KEY; } #ifdef SS_CTS_MODE_ENABLE if (((req_ctx->mode == SS_AES_MODE_CBC) || (req_ctx->mode == SS_AES_MODE_CTS)) && (req->info != NULL)) { #else if ((req_ctx->mode == SS_AES_MODE_CBC) && (req->info != NULL)) { #endif SS_DBG("IV address = %p, size = %d\n", req->info, crypto_ablkcipher_ivsize(tfm)); ss_iv_set(req->info, crypto_ablkcipher_ivsize(tfm)); } #ifdef SS_CTR_MODE_ENABLE if (req_ctx->mode == SS_AES_MODE_CTR) { SS_DBG("Cnt address = %p, size = %d\n", req->info, crypto_ablkcipher_ivsize(tfm)); if (ctx->cnt == 0) memcpy(ctx->iv, req->info, crypto_ablkcipher_ivsize(tfm)); SS_DBG("CNT: %08x %08x %08x %08x \n", *(int *)&ctx->iv[0], *(int *)&ctx->iv[4], *(int *)&ctx->iv[8], *(int *)&ctx->iv[12]); ss_cnt_set(ctx->iv, crypto_ablkcipher_ivsize(tfm)); } #endif req_ctx->dma_src.sg = req->src; req_ctx->dma_dst.sg = req->dst; ret = ss_aes_start(ctx, req_ctx, req->nbytes); if (ret < 0) SS_ERR("ss_aes_start fail(%d)\n", ret); ss_dev_unlock(); #ifdef SS_CTR_MODE_ENABLE if (req_ctx->mode == SS_AES_MODE_CTR) { ss_cnt_get(ctx->comm.flow, ctx->iv, crypto_ablkcipher_ivsize(tfm)); SS_DBG("CNT: %08x %08x %08x %08x \n", *(int *)&ctx->iv[0], *(int *)&ctx->iv[4], *(int *)&ctx->iv[8], *(int *)&ctx->iv[12]); } #endif ctx->cnt += req->nbytes; if (req->base.complete) req->base.complete(&req->base, ret); return ret; } irqreturn_t sunxi_ss_irq_handler(int irq, void *dev_id) { sunxi_ss_t *sss = (sunxi_ss_t *)dev_id; unsigned long flags = 0; int pending = 0; spin_lock_irqsave(&sss->lock, flags); pending = ss_pending_get(); SS_DBG("SS pending %#x\n", pending); spin_unlock_irqrestore(&sss->lock, flags); return IRQ_HANDLED; }
static int ss_aes_one_req(sunxi_ss_t *sss, struct ablkcipher_request *req) { int ret = 0; struct crypto_ablkcipher *tfm = NULL; ss_aes_ctx_t *ctx = NULL; ss_aes_req_ctx_t *req_ctx = NULL; int key_map_flag = 0; int iv_map_flag = 0; SS_ENTER(); if (!req->src || !req->dst) { SS_ERR("Invalid sg: src = %p, dst = %p\n", req->src, req->dst); return -EINVAL; } ss_dev_lock(); tfm = crypto_ablkcipher_reqtfm(req); req_ctx = ablkcipher_request_ctx(req); ctx = crypto_ablkcipher_ctx(tfm); /* A31 SS need update key each cycle in decryption. */ if ((ctx->comm.flags & SS_FLAG_NEW_KEY) || (req_ctx->dir == SS_DIR_DECRYPT)) { SS_DBG("KEY address = %p, size = %d\n", ctx->key, ctx->key_size); ss_key_set(ctx->key, ctx->key_size); dma_map_single(&sss->pdev->dev, ctx->key, ctx->key_size, DMA_MEM_TO_DEV); key_map_flag = 1; ctx->comm.flags &= ~SS_FLAG_NEW_KEY; } #ifdef SS_CTS_MODE_ENABLE if (((req_ctx->mode == SS_AES_MODE_CBC) || (req_ctx->mode == SS_AES_MODE_CTS)) && (req->info != NULL)) { #else if ((req_ctx->mode == SS_AES_MODE_CBC) && (req->info != NULL)) { #endif SS_DBG("IV address = %p, size = %d\n", req->info, crypto_ablkcipher_ivsize(tfm)); memcpy(ctx->iv, req->info, crypto_ablkcipher_ivsize(tfm)); ss_iv_set(ctx->iv, crypto_ablkcipher_ivsize(tfm)); dma_map_single(&sss->pdev->dev, ctx->iv, crypto_ablkcipher_ivsize(tfm), DMA_MEM_TO_DEV); iv_map_flag = 1; } #ifdef SS_CTR_MODE_ENABLE if (req_ctx->mode == SS_AES_MODE_CTR) { SS_DBG("Cnt address = %p, size = %d\n", req->info, crypto_ablkcipher_ivsize(tfm)); if (ctx->cnt == 0) memcpy(ctx->iv, req->info, crypto_ablkcipher_ivsize(tfm)); SS_DBG("CNT: %08x %08x %08x %08x \n", *(int *)&ctx->iv[0], *(int *)&ctx->iv[4], *(int *)&ctx->iv[8], *(int *)&ctx->iv[12]); ss_cnt_set(ctx->iv, crypto_ablkcipher_ivsize(tfm)); dma_map_single(&sss->pdev->dev, ctx->iv, crypto_ablkcipher_ivsize(tfm), DMA_MEM_TO_DEV); iv_map_flag = 1; } #endif if (req_ctx->type == SS_METHOD_RSA) ss_rsa_width_set(crypto_ablkcipher_ivsize(tfm)); req_ctx->dma_src.sg = req->src; req_ctx->dma_dst.sg = req->dst; ret = ss_aes_start(ctx, req_ctx, req->nbytes); if (ret < 0) SS_ERR("ss_aes_start fail(%d)\n", ret); ss_dev_unlock(); if (req->base.complete) req->base.complete(&req->base, ret); if (key_map_flag == 1) dma_unmap_single(&ss_dev->pdev->dev, virt_to_phys(ctx->key), ctx->key_size, DMA_MEM_TO_DEV); if (iv_map_flag == 1) dma_unmap_single(&sss->pdev->dev, virt_to_phys(ctx->iv), crypto_ablkcipher_ivsize(tfm), DMA_MEM_TO_DEV); #ifdef SS_CTR_MODE_ENABLE if (req_ctx->mode == SS_AES_MODE_CTR) { ss_cnt_get(ctx->comm.flow, ctx->iv, crypto_ablkcipher_ivsize(tfm)); SS_DBG("CNT: %08x %08x %08x %08x \n", *(int *)&ctx->iv[0], *(int *)&ctx->iv[4], *(int *)&ctx->iv[8], *(int *)&ctx->iv[12]); } #endif ctx->cnt += req->nbytes; return ret; } static int ss_hash_one_req(sunxi_ss_t *sss, struct ahash_request *req) { int ret = 0; ss_aes_req_ctx_t *req_ctx = NULL; ss_hash_ctx_t *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); SS_ENTER(); if (!req->src) { SS_ERR("Invalid sg: src = %p\n", req->src); return -EINVAL; } ss_dev_lock(); req_ctx = ahash_request_ctx(req); req_ctx->dma_src.sg = req->src; ss_hash_padding_data_prepare(ctx, req->result, req->nbytes); ret = ss_hash_start(ctx, req_ctx, req->nbytes); if (ret < 0) SS_ERR("ss_hash_start fail(%d)\n", ret); ss_dev_unlock(); if (req->base.complete) req->base.complete(&req->base, ret); return ret; }