static void sunxi_ss_cra_exit(struct crypto_tfm *tfm) { SS_ENTER(); ss_flow_release(crypto_tfm_ctx(tfm)); /* sun8iw6 and sun9iw1 need reset SS controller after each operation. */ #ifdef SS_IDMA_ENABLE ss_reset(); #endif }
static void sbi_ctrl_postw(DepRegisterInfo *reg, uint64_t val64) { SlaveBootInt *s = SBI(reg->opaque); uint32_t val = val64; if (val & R_SBI_CTRL_SOFT_RST_MASK) { ss_reset(DEVICE(s)); DEP_AF_DP32(s->regs, SBI_CTRL, SOFT_RST, 0); } ss_update_busy_line(s); }
static int ss_rng_start(ss_aes_ctx_t *ctx, u8 *rdata, unsigned int dlen) { int ret = 0; int flow = ctx->comm.flow; ss_pending_clear(flow); ss_irq_enable(flow); ss_flow_enable(flow); #ifdef SS_TRNG_ENABLE if (ctx->comm.flags & SS_FLAG_TRNG) { ss_method_set(SS_DIR_ENCRYPT, SS_METHOD_TRNG); ss_trng_osc_enable(); } else #endif ss_method_set(SS_DIR_ENCRYPT, SS_METHOD_PRNG); ss_rng_mode_set(SS_RNG_MODE_CONTINUE); ss_data_dst_set(ss_dev->flows[flow].buf_dst_dma); #ifdef SS_TRNG_ENABLE ss_data_len_set(DIV_ROUND_UP(dlen, 32)*(32>>2)); /* align with 32 Bytes */ #else ss_data_len_set(DIV_ROUND_UP(dlen, 20)*(20>>2)); /* align with 20 Bytes */ #endif SS_DBG("Flow: %d, Request: %d, Aligned: %d \n", flow, dlen, DIV_ROUND_UP(dlen, 20)*5); dma_map_single(&ss_dev->pdev->dev, ss_dev->flows[flow].buf_dst, SS_DMA_BUF_SIZE, DMA_DEV_TO_MEM); ss_ctrl_start(); ret = wait_for_completion_timeout(&ss_dev->flows[flow].done, msecs_to_jiffies(SS_WAIT_TIME)); if (ret == 0) { SS_ERR("Timed out\n"); ss_reset(); return -ETIMEDOUT; } memcpy(rdata, ss_dev->flows[flow].buf_dst, dlen); dma_unmap_single(&ss_dev->pdev->dev, ss_dev->flows[flow].buf_dst_dma, SS_DMA_BUF_SIZE, DMA_DEV_TO_MEM); ss_irq_disable(flow); ret = dlen; #ifdef SS_TRNG_ENABLE if (ctx->comm.flags & SS_FLAG_TRNG) ss_trng_osc_disable(); #endif ss_ctrl_stop(); return ret; }
int ss_hash_start(ss_hash_ctx_t *ctx, ss_aes_req_ctx_t *req_ctx, int len) { int ret = 0; int flow = ctx->comm.flow; ss_pending_clear(flow); ss_dma_enable(flow); ss_fifo_init(); ss_method_set(req_ctx->dir, req_ctx->type); SS_DBG("Flow: %d, Dir: %d, Method: %d, Mode: %d, len: %d / %d \n", flow, req_ctx->dir, req_ctx->type, req_ctx->mode, len, ctx->cnt); SS_DBG("IV address = 0x%p, size = %d\n", ctx->md, ctx->md_size); ss_iv_set(ctx->md, ctx->md_size); ss_iv_mode_set(SS_IV_MODE_ARBITRARY); init_completion(&req_ctx->done); if (ss_dma_prepare(&req_ctx->dma_src)) return -EBUSY; ret = ss_dma_src_config(ss_dev, ctx, req_ctx, len, 1); if (ret == 0) { ss_ctrl_start(); ss_dma_start(&req_ctx->dma_src); ret = wait_for_completion_timeout(&req_ctx->done, msecs_to_jiffies(SS_WAIT_TIME)); if (ret == 0) { SS_ERR("Timed out\n"); ss_reset(); return -ETIMEDOUT; } ss_md_get(ctx->md, NULL, ctx->md_size); } ss_dma_disable(flow); ss_dma_release(ss_dev, &req_ctx->dma_src); ctx->cnt += len; return 0; }
static int ss_aes_start(ss_aes_ctx_t *ctx, ss_aes_req_ctx_t *req_ctx, int len) { int ret = 0; int flow = ctx->comm.flow; ss_pending_clear(flow); ss_dma_enable(flow); ss_fifo_init(); ss_method_set(req_ctx->dir, req_ctx->type); ss_aes_mode_set(req_ctx->mode); SS_DBG("Flow: %d, Dir: %d, Method: %d, Mode: %d, len: %d \n", flow, req_ctx->dir, req_ctx->type, req_ctx->mode, len); init_completion(&req_ctx->done); if (ss_dma_prepare(&req_ctx->dma_src)) return -EBUSY; ss_dma_prepare(&req_ctx->dma_dst); ss_dma_src_config(ss_dev, ctx, req_ctx, len, 0); ss_dma_dst_config(ss_dev, ctx, req_ctx, len, 1); ss_dma_start(&req_ctx->dma_dst); ss_ctrl_start(); ss_dma_start(&req_ctx->dma_src); ret = wait_for_completion_timeout(&req_ctx->done, msecs_to_jiffies(SS_WAIT_TIME)); if (ret == 0) { SS_ERR("Timed out\n"); ss_reset(); return -ETIMEDOUT; } ss_ctrl_stop(); ss_dma_disable(flow); ss_dma_release(ss_dev, &req_ctx->dma_src); ss_dma_release(ss_dev, &req_ctx->dma_dst); return 0; }
static int ss_hash_start(ss_hash_ctx_t *ctx, ss_aes_req_ctx_t *req_ctx, int len) { int ret = 0; int flow = ctx->comm.flow; int md_map_flag = 0; ss_pending_clear(flow); ss_irq_enable(flow); ss_flow_enable(flow); ss_method_set(req_ctx->dir, req_ctx->type); SS_DBG("Flow: %d, Dir: %d, Method: %d, Mode: %d, len: %d / %d \n", flow, req_ctx->dir, req_ctx->type, req_ctx->mode, len, ctx->cnt); SS_DBG("IV address = 0x%p, size = %d\n", ctx->md, ctx->md_size); ss_iv_set(ctx->md, ctx->md_size); ss_iv_mode_set(SS_IV_MODE_ARBITRARY); init_completion(&req_ctx->done); if (ss_dma_prepare(&req_ctx->dma_src)) return -EBUSY; ret = ss_dma_src_config(ss_dev, ctx, req_ctx, len, 1); if (ret == 0) { /* 1. Copy data from user space to sss->flows[flow].buf_src. */ ss_dma_start(&req_ctx->dma_src); ret = wait_for_completion_timeout(&req_ctx->done, msecs_to_jiffies(SS_WAIT_TIME)); if (ret == 0) { SS_ERR("Timed out\n"); return -ETIMEDOUT; } /* 2. Start the SS. */ ss_data_src_set(ss_dev->flows[flow].buf_src_dma); ss_data_dst_set(ss_dev->flows[flow].buf_dst_dma); SS_DBG("ss_dev->buf_dst_dma = %#x\n", ss_dev->flows[flow].buf_dst_dma); ss_data_len_set((len - len%SHA1_BLOCK_SIZE)/4); #ifdef SS_SHA_SWAP_MID_ENABLE if (req_ctx->type != SS_METHOD_MD5) ss_hash_swap(ctx->md, ctx->md_size); #endif dma_map_single(&ss_dev->pdev->dev, ctx->md, ctx->md_size, DMA_MEM_TO_DEV); md_map_flag = 1; SS_DBG("Before SS, CTRL: 0x%08x \n", ss_reg_rd(SS_REG_CTL)); dma_map_single(&ss_dev->pdev->dev, ss_dev->flows[flow].buf_dst, ctx->md_size, DMA_DEV_TO_MEM); ss_ctrl_start(); ret = wait_for_completion_timeout(&ss_dev->flows[flow].done, msecs_to_jiffies(SS_WAIT_TIME)); if (ret == 0) { SS_ERR("Timed out\n"); ss_reset(); return -ETIMEDOUT; } SS_DBG("After SS, CTRL: 0x%08x \n", ss_reg_rd(SS_REG_CTL)); SS_DBG("After SS, dst data: \n"); print_hex(ss_dev->flows[flow].buf_dst, 32, (int)ss_dev->flows[flow].buf_dst); /* 3. Copy the MD from sss->buf_dst to ctx->md. */ memcpy(ctx->md, ss_dev->flows[flow].buf_dst, ctx->md_size); } ss_ctrl_stop(); ss_irq_disable(flow); if (md_map_flag == 1) { dma_unmap_single(&ss_dev->pdev->dev, ss_dev->flows[flow].buf_dst_dma, ctx->md_size, DMA_DEV_TO_MEM); dma_unmap_single(&ss_dev->pdev->dev, virt_to_phys(ctx->md), ctx->md_size, DMA_MEM_TO_DEV); } ss_dma_release(ss_dev, &req_ctx->dma_src); ctx->cnt += len; return 0; }
static int ss_aes_start(ss_aes_ctx_t *ctx, ss_aes_req_ctx_t *req_ctx, int len) { int ret = 0; int flow = ctx->comm.flow; ss_pending_clear(flow); ss_irq_enable(flow); ss_flow_enable(flow); ss_method_set(req_ctx->dir, req_ctx->type); ss_aes_mode_set(req_ctx->mode); SS_DBG("Flow: %d, Dir: %d, Method: %d, Mode: %d, len: %d \n", flow, req_ctx->dir, req_ctx->type, req_ctx->mode, len); init_completion(&req_ctx->done); /* 1. Copy data from user space to sss->flows[flow].buf_src. */ if (ss_dma_prepare(&req_ctx->dma_src)) return -EBUSY; #ifdef SS_CTR_MODE_ENABLE if ((req_ctx->mode == SS_AES_MODE_CTR) && ((len%AES_BLOCK_SIZE) != 0)) memset(&ss_dev->flows[flow].buf_src[len], 0, AES_BLOCK_SIZE); #endif ss_dma_src_config(ss_dev, ctx, req_ctx, len, 1); ss_dma_start(&req_ctx->dma_src); ret = wait_for_completion_timeout(&req_ctx->done, msecs_to_jiffies(SS_WAIT_TIME)); if (ret == 0) { SS_ERR("Timed out\n"); return -ETIMEDOUT; } /* 2. Start the SS. */ ss_data_src_set(ss_dev->flows[flow].buf_src_dma); ss_data_dst_set(ss_dev->flows[flow].buf_dst_dma); SS_DBG("ss_dev->buf_dst_dma = %#x\n", ss_dev->flows[flow].buf_dst_dma); #ifdef SS_CTS_MODE_ENABLE if (req_ctx->mode == SS_AES_MODE_CTS) { ss_data_len_set(len); if (len < SZ_4K) /* A bad way to determin the last packet of CTS mode. */ ss_cts_last(); } else #endif ss_data_len_set(DIV_ROUND_UP(len, AES_BLOCK_SIZE)*4); ss_ctrl_start(); ret = wait_for_completion_timeout(&ss_dev->flows[flow].done, msecs_to_jiffies(SS_WAIT_TIME*50)); if (ret == 0) { SS_ERR("Timed out\n"); ss_reset(); return -ETIMEDOUT; } /* 3. Copy the result from sss->flows[flow].buf_dst to user space. */ if (ss_dma_prepare(&req_ctx->dma_dst)) return -EBUSY; ss_dma_dst_config(ss_dev, ctx, req_ctx, len, 1); ss_dma_start(&req_ctx->dma_dst); ret = wait_for_completion_timeout(&req_ctx->done, msecs_to_jiffies(SS_WAIT_TIME)); if (ret == 0) { SS_ERR("Timed out\n"); return -ETIMEDOUT; } ss_ctrl_stop(); ss_irq_disable(flow); ss_dma_release(ss_dev, &req_ctx->dma_src); ss_dma_release(ss_dev, &req_ctx->dma_dst); return 0; }