static int ss_sha1_init(struct ahash_request *req) { int iv[SHA1_DIGEST_SIZE/4] = {SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4}; #ifdef SS_SHA_SWAP_PRE_ENABLE #ifdef SS_SHA_NO_SWAP_IV4 ss_hash_swap((char *)iv, SHA1_DIGEST_SIZE - 4); #else ss_hash_swap((char *)iv, SHA1_DIGEST_SIZE); #endif #endif return ss_hash_init(req, SS_METHOD_SHA1, SHA1_DIGEST_SIZE, (char *)iv); }
static int ss_sha256_init(struct ahash_request *req) { int iv[SHA256_DIGEST_SIZE/4] = {SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7}; #ifdef SS_SHA_SWAP_PRE_ENABLE ss_hash_swap((char *)iv, SHA256_DIGEST_SIZE); #endif return ss_hash_init(req, SS_METHOD_SHA256, SHA256_DIGEST_SIZE, (char *)iv); }
static int ss_sha512_init(struct ahash_request *req) { int iv[SHA512_DIGEST_SIZE/4] = {GET_U64_HIGH(SHA512_H0), GET_U64_LOW(SHA512_H0), GET_U64_HIGH(SHA512_H1), GET_U64_LOW(SHA512_H1), GET_U64_HIGH(SHA512_H2), GET_U64_LOW(SHA512_H2), GET_U64_HIGH(SHA512_H3), GET_U64_LOW(SHA512_H3), GET_U64_HIGH(SHA512_H4), GET_U64_LOW(SHA512_H4), GET_U64_HIGH(SHA512_H5), GET_U64_LOW(SHA512_H5), GET_U64_HIGH(SHA512_H6), GET_U64_LOW(SHA512_H6), GET_U64_HIGH(SHA512_H7), GET_U64_LOW(SHA512_H7)}; #ifdef SS_SHA_SWAP_PRE_ENABLE ss_hash_swap((char *)iv, SHA512_DIGEST_SIZE); #endif return ss_hash_init(req, SS_METHOD_SHA512, SHA512_DIGEST_SIZE, (char *)iv); }
int ss_hash_final(struct ahash_request *req) { int pad_len = 0; ss_aes_req_ctx_t *req_ctx = ahash_request_ctx(req); ss_hash_ctx_t *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct scatterlist last = {0}; /* make a sg struct for padding data. */ if (req->result == NULL) { SS_ERR("Invalid result porinter. \n"); return -EINVAL; } SS_DBG("Method: %d, cnt: %d\n", req_ctx->type, ctx->cnt); if (ss_dev->suspend) { SS_ERR("SS has already suspend. \n"); return -EAGAIN; } /* Process the padding data. */ pad_len = ss_hash_padding(ctx, req_ctx->type == SS_METHOD_MD5 ? 0 : 1); SS_DBG("Pad len: %d \n", pad_len); req_ctx->dma_src.sg = &last; sg_init_table(&last, 1); sg_set_buf(&last, ctx->pad, pad_len); SS_DBG("Padding data: \n"); print_hex(ctx->pad, 128, (int)ctx->pad); ss_dev_lock(); ss_hash_start(ctx, req_ctx, pad_len); ss_sha_final(); SS_DBG("Method: %d, cnt: %d\n", req_ctx->type, ctx->cnt); ss_check_sha_end(); memcpy(req->result, ctx->md, ctx->md_size); ss_ctrl_stop(); ss_dev_unlock(); #ifdef SS_SHA_SWAP_FINAL_ENABLE if (req_ctx->type != SS_METHOD_MD5) ss_hash_swap(req->result, ctx->md_size); #endif return 0; }
static int ss_hash_start(ss_hash_ctx_t *ctx, ss_aes_req_ctx_t *req_ctx, int len) { int ret = 0; int flow = ctx->comm.flow; int md_map_flag = 0; ss_pending_clear(flow); ss_irq_enable(flow); ss_flow_enable(flow); ss_method_set(req_ctx->dir, req_ctx->type); SS_DBG("Flow: %d, Dir: %d, Method: %d, Mode: %d, len: %d / %d \n", flow, req_ctx->dir, req_ctx->type, req_ctx->mode, len, ctx->cnt); SS_DBG("IV address = 0x%p, size = %d\n", ctx->md, ctx->md_size); ss_iv_set(ctx->md, ctx->md_size); ss_iv_mode_set(SS_IV_MODE_ARBITRARY); init_completion(&req_ctx->done); if (ss_dma_prepare(&req_ctx->dma_src)) return -EBUSY; ret = ss_dma_src_config(ss_dev, ctx, req_ctx, len, 1); if (ret == 0) { /* 1. Copy data from user space to sss->flows[flow].buf_src. */ ss_dma_start(&req_ctx->dma_src); ret = wait_for_completion_timeout(&req_ctx->done, msecs_to_jiffies(SS_WAIT_TIME)); if (ret == 0) { SS_ERR("Timed out\n"); return -ETIMEDOUT; } /* 2. Start the SS. */ ss_data_src_set(ss_dev->flows[flow].buf_src_dma); ss_data_dst_set(ss_dev->flows[flow].buf_dst_dma); SS_DBG("ss_dev->buf_dst_dma = %#x\n", ss_dev->flows[flow].buf_dst_dma); ss_data_len_set((len - len%SHA1_BLOCK_SIZE)/4); #ifdef SS_SHA_SWAP_MID_ENABLE if (req_ctx->type != SS_METHOD_MD5) ss_hash_swap(ctx->md, ctx->md_size); #endif dma_map_single(&ss_dev->pdev->dev, ctx->md, ctx->md_size, DMA_MEM_TO_DEV); md_map_flag = 1; SS_DBG("Before SS, CTRL: 0x%08x \n", ss_reg_rd(SS_REG_CTL)); dma_map_single(&ss_dev->pdev->dev, ss_dev->flows[flow].buf_dst, ctx->md_size, DMA_DEV_TO_MEM); ss_ctrl_start(); ret = wait_for_completion_timeout(&ss_dev->flows[flow].done, msecs_to_jiffies(SS_WAIT_TIME)); if (ret == 0) { SS_ERR("Timed out\n"); ss_reset(); return -ETIMEDOUT; } SS_DBG("After SS, CTRL: 0x%08x \n", ss_reg_rd(SS_REG_CTL)); SS_DBG("After SS, dst data: \n"); print_hex(ss_dev->flows[flow].buf_dst, 32, (int)ss_dev->flows[flow].buf_dst); /* 3. Copy the MD from sss->buf_dst to ctx->md. */ memcpy(ctx->md, ss_dev->flows[flow].buf_dst, ctx->md_size); } ss_ctrl_stop(); ss_irq_disable(flow); if (md_map_flag == 1) { dma_unmap_single(&ss_dev->pdev->dev, ss_dev->flows[flow].buf_dst_dma, ctx->md_size, DMA_DEV_TO_MEM); dma_unmap_single(&ss_dev->pdev->dev, virt_to_phys(ctx->md), ctx->md_size, DMA_MEM_TO_DEV); } ss_dma_release(ss_dev, &req_ctx->dma_src); ctx->cnt += len; return 0; }