/* Make a sg_table based on sg[] of crypto request. */ static int ss_sg_table_init(struct sg_table *sgt, struct scatterlist *sg, int len, char *vbase, dma_addr_t pbase) { int i; int npages = 0; int offset = 0; struct scatterlist *src_sg = sg; struct scatterlist *dst_sg = NULL; npages = ss_sg_cnt(sg, len); WARN_ON(npages == 0); if (sg_alloc_table(sgt, npages, GFP_KERNEL)) { SS_ERR("sg_alloc_table(%d) failed!\n", npages); WARN_ON(1); } dst_sg = sgt->sgl; for (i=0; i<npages; i++) { sg_set_buf(dst_sg, vbase + offset, sg_dma_len(src_sg)); offset += sg_dma_len(src_sg); src_sg = sg_next(src_sg); dst_sg = sg_next(dst_sg); } return 0; }
/* ctx - only used for HASH. */ static int ss_dma_src_config(sunxi_ss_t *sss, void *ctx, ss_aes_req_ctx_t *req_ctx, int len, int cb) { int nents = 0; int npages = 0; ss_dma_info_t *info = &req_ctx->dma_src; struct dma_slave_config dma_conf = {0}; struct dma_async_tx_descriptor *dma_desc = NULL; info->dir = DMA_MEM_TO_DEV; dma_conf.direction = info->dir; dma_conf.dst_addr = sss->base_addr_phy + SS_REG_RXFIFO; dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; dma_conf.src_maxburst = 1; dma_conf.dst_maxburst = 1; dma_conf.slave_id = sunxi_slave_id(DRQDST_SS, DRQSRC_SDRAM); dmaengine_slave_config(info->chan, &dma_conf); npages = ss_sg_cnt(info->sg, len); WARN_ON(npages == 0); nents = dma_map_sg(&sss->pdev->dev, info->sg, npages, info->dir); SS_DBG("npages = %d, nents = %d, len = %d, sg.len = %d \n", npages, nents, len, sg_dma_len(info->sg)); if (!nents) { SS_ERR("dma_map_sg() error\n"); return -EINVAL; } info->nents = nents; if (SS_METHOD_IS_HASH(req_ctx->type)) { ss_hash_padding_sg_prepare(&info->sg[nents-1], len); /* Total len is too small, so there is no data for DMA. */ if (len < SHA1_BLOCK_SIZE) return 1; } dma_desc = dmaengine_prep_slave_sg(info->chan, info->sg, nents, info->dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!dma_desc) { SS_ERR("dmaengine_prep_slave_sg() failed!\n"); return -1; } if (cb == 1) { dma_desc->callback = ss_dma_cb; dma_desc->callback_param = (void *)req_ctx; } dmaengine_submit(dma_desc); return 0; }
static int ss_dma_dst_config(sunxi_ss_t *sss, void *ctx, ss_aes_req_ctx_t *req_ctx, int len, int cb) { int nents = 0; int npages = 0; ss_dma_info_t *info = &req_ctx->dma_dst; struct dma_slave_config dma_conf = {0}; struct dma_async_tx_descriptor *dma_desc = NULL; info->dir = DMA_DEV_TO_MEM; dma_conf.direction = info->dir; dma_conf.src_addr = sss->base_addr_phy + SS_REG_TXFIFO; dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; dma_conf.src_maxburst = 1; dma_conf.dst_maxburst = 1; dma_conf.slave_id = sunxi_slave_id(DRQDST_SDRAM, DRQSRC_SS); dmaengine_slave_config(info->chan, &dma_conf); npages = ss_sg_cnt(info->sg, len); WARN_ON(npages == 0); nents = dma_map_sg(&sss->pdev->dev, info->sg, npages, info->dir); SS_DBG("npages = %d, nents = %d, len = %d, sg.len = %d \n", npages, nents, len, sg_dma_len(info->sg)); if (!nents) { SS_ERR("dma_map_sg() error\n"); return -EINVAL; } info->nents = nents; dma_desc = dmaengine_prep_slave_sg(info->chan, info->sg, nents, info->dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!dma_desc) { SS_ERR("dmaengine_prep_slave_sg() failed!\n"); return -1; } if (cb == 1) { dma_desc->callback = ss_dma_cb; dma_desc->callback_param = (void *)req_ctx; } dmaengine_submit(dma_desc); return 0; }