static int ccp_aes_cmac_complete(struct crypto_async_request *async_req, int ret) { struct ahash_request *req = ahash_request_cast(async_req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); unsigned int digest_size = crypto_ahash_digestsize(tfm); if (ret) goto e_free; if (rctx->hash_rem) { /* Save remaining data to buffer */ unsigned int offset = rctx->nbytes - rctx->hash_rem; scatterwalk_map_and_copy(rctx->buf, rctx->src, offset, rctx->hash_rem, 0); rctx->buf_count = rctx->hash_rem; } else { rctx->buf_count = 0; } /* Update result area if supplied */ if (req->result) memcpy(req->result, rctx->iv, digest_size); e_free: sg_free_table(&rctx->data_sg); return ret; }
static void mv_cesa_ahash_complete(struct crypto_async_request *req) { struct ahash_request *ahashreq = ahash_request_cast(req); struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); struct mv_cesa_engine *engine = creq->base.engine; unsigned int digsize; int i; digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); for (i = 0; i < digsize / 4; i++) creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i)); if (creq->last_req) { /* * Hardware's MD5 digest is in little endian format, but * SHA in big endian format */ if (creq->algo_le) { __le32 *result = (void *)ahashreq->result; for (i = 0; i < digsize / 4; i++) result[i] = cpu_to_le32(creq->state[i]); } else { __be32 *result = (void *)ahashreq->result; for (i = 0; i < digsize / 4; i++) result[i] = cpu_to_be32(creq->state[i]); } } atomic_sub(ahashreq->nbytes, &engine->load); }
static void rk_crypto_tasklet_cb(unsigned long data) { struct rk_crypto_info *dev = (struct rk_crypto_info *)data; struct crypto_async_request *async_req, *backlog; unsigned long flags; int err = 0; spin_lock_irqsave(&dev->lock, flags); backlog = crypto_get_backlog(&dev->queue); async_req = crypto_dequeue_request(&dev->queue); spin_unlock_irqrestore(&dev->lock, flags); if (!async_req) { dev_err(dev->dev, "async_req is NULL !!\n"); return; } if (backlog) { backlog->complete(backlog, -EINPROGRESS); backlog = NULL; } if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER) dev->ablk_req = ablkcipher_request_cast(async_req); else dev->ahash_req = ahash_request_cast(async_req); err = dev->start(dev); if (err) dev->complete(dev, err); }
void sunxi_ss_work(struct work_struct *work) { int ret = 0; unsigned long flags = 0; sunxi_ss_t *sss = container_of(work, sunxi_ss_t, work); struct crypto_async_request *async_req = NULL; struct crypto_async_request *backlog = NULL; /* empty the crypto queue and then return */ do { spin_lock_irqsave(&sss->lock, flags); backlog = crypto_get_backlog(&sss->queue); async_req = crypto_dequeue_request(&sss->queue); spin_unlock_irqrestore(&sss->lock, flags); if (!async_req) { SS_DBG("async_req is NULL! \n"); break; } if (backlog) backlog->complete(backlog, -EINPROGRESS); SS_DBG("async_req->flags = %#x \n", async_req->flags); if (async_req->flags & SS_FLAG_AES) ret = ss_aes_one_req(sss, ablkcipher_request_cast(async_req)); else if (async_req->flags & SS_FLAG_HASH) ret = ss_hash_one_req(sss, ahash_request_cast(async_req)); } while (!ret); }
static void mv_process_hash_current(int first_block) { struct ahash_request *req = ahash_request_cast(cpg->cur_req); struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); struct req_progress *p = &cpg->p; struct sec_accel_config op = { 0 }; int is_last; switch (req_ctx->op) { case COP_SHA1: default: op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1; break; case COP_HMAC_SHA1: op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1; break; } op.mac_src_p = MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32) req_ctx-> count); setup_data_in(); op.mac_digest = MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len); op.mac_iv = MAC_INNER_IV_P(SRAM_HMAC_IV_IN) | MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT); is_last = req_ctx->last_chunk && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes) && (req_ctx->count <= MAX_HW_HASH_SIZE); if (req_ctx->first_hash) { if (is_last) op.config |= CFG_NOT_FRAG; else op.config |= CFG_FIRST_FRAG; req_ctx->first_hash = 0; } else { if (is_last) op.config |= CFG_LAST_FRAG; else op.config |= CFG_MID_FRAG; } memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); /* GO */ writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); /* * XXX: add timer if the interrupt does not occur for some mystery * reason */ }
static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status) { struct ahash_request *ahashreq = ahash_request_cast(req); struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) return mv_cesa_dma_process(&creq->base, status); return mv_cesa_ahash_std_process(ahashreq, status); }
static void mv_cesa_ahash_step(struct crypto_async_request *req) { struct ahash_request *ahashreq = ahash_request_cast(req); struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) mv_cesa_dma_step(&creq->base); else mv_cesa_ahash_std_step(ahashreq); }
static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) { struct ahash_request *req = ahash_request_cast(async_req); struct qce_sha_reqctx *rctx = ahash_request_ctx(req); struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm); struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); struct qce_device *qce = tmpl->qce; unsigned long flags = rctx->flags; int ret; if (IS_SHA_HMAC(flags)) { rctx->authkey = ctx->authkey; rctx->authklen = QCE_SHA_HMAC_KEY_SIZE; } else if (IS_CMAC(flags)) { rctx->authkey = ctx->authkey; rctx->authklen = AES_KEYSIZE_128; } rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); if (rctx->src_nents < 0) { dev_err(qce->dev, "Invalid numbers of src SG.\n"); return rctx->src_nents; } ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); if (ret < 0) return ret; sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); if (ret < 0) goto error_unmap_src; ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents, &rctx->result_sg, 1, qce_ahash_done, async_req); if (ret) goto error_unmap_dst; qce_dma_issue_pending(&qce->dma); ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0); if (ret) goto error_terminate; return 0; error_terminate: qce_dma_terminate_all(&qce->dma); error_unmap_dst: dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); error_unmap_src: dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); return ret; }
static void mv_cesa_ahash_prepare(struct crypto_async_request *req, struct mv_cesa_engine *engine) { struct ahash_request *ahashreq = ahash_request_cast(req); struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); creq->base.engine = engine; if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) mv_cesa_ahash_dma_prepare(ahashreq); else mv_cesa_ahash_std_prepare(ahashreq); }
static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req) { struct ahash_request *ahashreq = ahash_request_cast(req); struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); if (creq->last_req) mv_cesa_ahash_last_cleanup(ahashreq); mv_cesa_ahash_cleanup(ahashreq); if (creq->cache_ptr) sg_pcopy_to_buffer(ahashreq->src, creq->src_nents, creq->cache, creq->cache_ptr, ahashreq->nbytes - creq->cache_ptr); }
static int queue_manag(void *data) { cpg->eng_st = ENGINE_IDLE; do { struct crypto_async_request *async_req = NULL; struct crypto_async_request *backlog; __set_current_state(TASK_INTERRUPTIBLE); if (cpg->eng_st == ENGINE_W_DEQUEUE) dequeue_complete_req(); spin_lock_irq(&cpg->lock); if (cpg->eng_st == ENGINE_IDLE) { backlog = crypto_get_backlog(&cpg->queue); async_req = crypto_dequeue_request(&cpg->queue); if (async_req) { BUG_ON(cpg->eng_st != ENGINE_IDLE); cpg->eng_st = ENGINE_BUSY; } } spin_unlock_irq(&cpg->lock); if (backlog) { backlog->complete(backlog, -EINPROGRESS); backlog = NULL; } if (async_req) { if (async_req->tfm->__crt_alg->cra_type != &crypto_ahash_type) { struct ablkcipher_request *req = container_of(async_req, struct ablkcipher_request, base); mv_start_new_crypt_req(req); } else { struct ahash_request *req = ahash_request_cast(async_req); mv_start_new_hash_req(req); } async_req = NULL; } schedule(); } while (!kthread_should_stop());
static int sahara_queue_manage(void *data) { struct sahara_dev *dev = (struct sahara_dev *)data; struct crypto_async_request *async_req; struct crypto_async_request *backlog; int ret = 0; do { __set_current_state(TASK_INTERRUPTIBLE); mutex_lock(&dev->queue_mutex); backlog = crypto_get_backlog(&dev->queue); async_req = crypto_dequeue_request(&dev->queue); mutex_unlock(&dev->queue_mutex); if (backlog) backlog->complete(backlog, -EINPROGRESS); if (async_req) { if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_AHASH) { struct ahash_request *req = ahash_request_cast(async_req); ret = sahara_sha_process(req); } else { struct ablkcipher_request *req = ablkcipher_request_cast(async_req); ret = sahara_aes_process(req); } async_req->complete(async_req, ret); continue; } schedule(); } while (!kthread_should_stop()); return 0; }
void spum_queue_task(unsigned long data) { struct crypto_async_request *async_req = NULL, *backlog = NULL; unsigned long flags; spin_lock_irqsave(&spum_dev->lock, flags); if (test_bit(FLAGS_BUSY, &spum_dev->flags)) { spin_unlock_irqrestore(&spum_dev->lock, flags); return; } backlog = crypto_get_backlog(&spum_dev->spum_queue); async_req = crypto_dequeue_request(&spum_dev->spum_queue); if (async_req) set_bit(FLAGS_BUSY, &spum_dev->flags); spin_unlock_irqrestore(&spum_dev->lock, flags); if (!async_req) return; if (backlog) backlog->complete(backlog, -EINPROGRESS); if (async_req->tfm->__crt_alg->cra_type == &crypto_ahash_type) { spum_dev->hash_dev->req = ahash_request_cast(async_req); #if defined(CONFIG_CRYPTO_DEV_BRCM_SPUM_HASH) spum_hash_process_request(spum_dev->hash_dev); #endif } else if (async_req->tfm->__crt_alg->cra_type == &crypto_ablkcipher_type) { spum_dev->aes_dev->req = ablkcipher_request_cast(async_req); #if defined(CONFIG_CRYPTO_DEV_BRCM_SPUM_AES) spum_aes_process_request(spum_dev->aes_dev); #endif } else { pr_err("%s: Invalid crypto request!\n", __func__); return; } return; }
static void qce_ahash_done(void *data) { struct crypto_async_request *async_req = data; struct ahash_request *req = ahash_request_cast(async_req); struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct qce_sha_reqctx *rctx = ahash_request_ctx(req); struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); struct qce_device *qce = tmpl->qce; struct qce_result_dump *result = qce->dma.result_buf; unsigned int digestsize = crypto_ahash_digestsize(ahash); int error; u32 status; error = qce_dma_terminate_all(&qce->dma); if (error) dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error); qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, rctx->src_chained); qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); memcpy(rctx->digest, result->auth_iv, digestsize); if (req->result) memcpy(req->result, result->auth_iv, digestsize); rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]); rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]); error = qce_check_status(qce, &status); if (error < 0) dev_dbg(qce->dev, "ahash operation error (%x)\n", status); req->src = rctx->src_orig; req->nbytes = rctx->nbytes_orig; rctx->last_blk = false; rctx->first_blk = false; qce->async_req_done(tmpl->qce, error); }
static void mv_hash_algo_completion(void) { struct ahash_request *req = ahash_request_cast(cpg->cur_req); struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); if (ctx->extra_bytes) copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes); sg_miter_stop(&cpg->p.src_sg_it); ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); if (likely(ctx->last_chunk)) { if (likely(ctx->count <= MAX_HW_HASH_SIZE)) { memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, crypto_ahash_digestsize(crypto_ahash_reqtfm (req))); } else mv_hash_final_fallback(req); } }