static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); struct mv_cesa_req *basereq = &creq->base; mv_cesa_dma_prepare(basereq, basereq->engine); }
static void mv_cesa_ahash_std_prepare(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); struct mv_cesa_ahash_std_req *sreq = &creq->req.std; sreq->offset = 0; }
static int mv_hash_final_fallback(struct ahash_request *req) { const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); struct { struct shash_desc shash; char ctx[crypto_shash_descsize(tfm_ctx->fallback)]; } desc; int rc; desc.shash.tfm = tfm_ctx->fallback; desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP; if (unlikely(req_ctx->first_hash)) { crypto_shash_init(&desc.shash); crypto_shash_update(&desc.shash, req_ctx->buffer, req_ctx->extra_bytes); } else { /* only SHA1 for now.... */ rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash); if (rc) goto out; } rc = crypto_shash_final(&desc.shash, req->result); out: return rc; }
static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash, u64 len, const void *cache) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); unsigned int digsize = crypto_ahash_digestsize(ahash); unsigned int blocksize; unsigned int cache_ptr; int ret; ret = crypto_ahash_init(req); if (ret) return ret; blocksize = crypto_ahash_blocksize(ahash); if (len >= blocksize) mv_cesa_update_op_cfg(&creq->op_tmpl, CESA_SA_DESC_CFG_MID_FRAG, CESA_SA_DESC_CFG_FRAG_MSK); creq->len = len; memcpy(creq->state, hash, digsize); creq->cache_ptr = 0; cache_ptr = do_div(len, blocksize); if (!cache_ptr) return 0; memcpy(creq->cache, cache, cache_ptr); creq->cache_ptr = cache_ptr; return 0; }
static int crc32c_final(struct ahash_request *req) { u32 *crcp = ahash_request_ctx(req); *(__le32 *)req->result = ~cpu_to_le32p(crcp); return 0; }
static int qce_import_common(struct ahash_request *req, u64 in_count, const u32 *state, const u8 *buffer, bool hmac) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct qce_sha_reqctx *rctx = ahash_request_ctx(req); unsigned int digestsize = crypto_ahash_digestsize(ahash); unsigned int blocksize; u64 count = in_count; blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); rctx->count = in_count; memcpy(rctx->buf, buffer, blocksize); if (in_count <= blocksize) { rctx->first_blk = 1; } else { rctx->first_blk = 0; /* * For HMAC, there is a hardware padding done when first block * is set. Therefore the byte_count must be incremened by 64 * after the first block operation. */ if (hmac) count += SHA_PADDING; } rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK); rctx->byte_count[1] = (__force __be32)(count >> 32); qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state, digestsize); rctx->buflen = (unsigned int)(in_count & (blocksize - 1)); return 0; }
static int n2_sha1_async_digest(struct ahash_request *req) { struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); struct sha1_state *s = &rctx->u.sha1; if (unlikely(req->nbytes == 0)) { static const char sha1_zero[SHA1_DIGEST_SIZE] = { 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, 0x07, 0x09 }; memcpy(req->result, sha1_zero, SHA1_DIGEST_SIZE); return 0; } s->state[0] = SHA1_H0; s->state[1] = SHA1_H1; s->state[2] = SHA1_H2; s->state[3] = SHA1_H3; s->state[4] = SHA1_H4; return n2_hash_async_digest(req, AUTH_TYPE_SHA1, SHA1_DIGEST_SIZE, SHA1_DIGEST_SIZE, s->state); }
static int qce_ahash_export(struct ahash_request *req, void *out) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct qce_sha_reqctx *rctx = ahash_request_ctx(req); unsigned long flags = rctx->flags; unsigned int digestsize = crypto_ahash_digestsize(ahash); unsigned int blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { struct sha1_state *out_state = out; out_state->count = rctx->count; qce_cpu_to_be32p_array((__be32 *)out_state->state, rctx->digest, digestsize); memcpy(out_state->buffer, rctx->buf, blocksize); } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { struct sha256_state *out_state = out; out_state->count = rctx->count; qce_cpu_to_be32p_array((__be32 *)out_state->state, rctx->digest, digestsize); memcpy(out_state->buf, rctx->buf, blocksize); } else { return -EINVAL; } return 0; }
static int n2_sha256_async_digest(struct ahash_request *req) { struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); struct sha256_state *s = &rctx->u.sha256; if (req->nbytes == 0) { static const char sha256_zero[SHA256_DIGEST_SIZE] = { 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 }; memcpy(req->result, sha256_zero, SHA256_DIGEST_SIZE); return 0; } s->state[0] = SHA256_H0; s->state[1] = SHA256_H1; s->state[2] = SHA256_H2; s->state[3] = SHA256_H3; s->state[4] = SHA256_H4; s->state[5] = SHA256_H5; s->state[6] = SHA256_H6; s->state[7] = SHA256_H7; return n2_hash_async_digest(req, AUTH_TYPE_SHA256, SHA256_DIGEST_SIZE, SHA256_DIGEST_SIZE, s->state); }
static void mv_cesa_ahash_last_cleanup(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) mv_cesa_ahash_dma_last_cleanup(req); }
static int sahara_sha_enqueue(struct ahash_request *req, int last) { struct sahara_sha_reqctx *rctx = ahash_request_ctx(req); struct sahara_dev *dev = dev_ptr; int ret; if (!req->nbytes && !last) return 0; mutex_lock(&rctx->mutex); rctx->last = last; if (!rctx->active) { rctx->active = 1; rctx->first = 1; } mutex_lock(&dev->queue_mutex); ret = crypto_enqueue_request(&dev->queue, &req->base); mutex_unlock(&dev->queue_mutex); wake_up_process(dev->kthread); mutex_unlock(&rctx->mutex); return ret; }
static int sahara_sha_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct sahara_sha_reqctx *rctx = ahash_request_ctx(req); memset(rctx, 0, sizeof(*rctx)); switch (crypto_ahash_digestsize(tfm)) { case SHA1_DIGEST_SIZE: rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1; rctx->digest_size = SHA1_DIGEST_SIZE; break; case SHA256_DIGEST_SIZE: rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256; rctx->digest_size = SHA256_DIGEST_SIZE; break; default: return -EINVAL; } rctx->context_size = rctx->digest_size + 4; rctx->active = 0; mutex_init(&rctx->mutex); return 0; }
static int n2_sha224_async_digest(struct ahash_request *req) { struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); struct sha256_state *s = &rctx->u.sha256; if (req->nbytes == 0) { static const char sha224_zero[SHA224_DIGEST_SIZE] = { 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, 0x2f }; memcpy(req->result, sha224_zero, SHA224_DIGEST_SIZE); return 0; } s->state[0] = SHA224_H0; s->state[1] = SHA224_H1; s->state[2] = SHA224_H2; s->state[3] = SHA224_H3; s->state[4] = SHA224_H4; s->state[5] = SHA224_H5; s->state[6] = SHA224_H6; s->state[7] = SHA224_H7; return n2_hash_async_digest(req, AUTH_TYPE_SHA256, SHA256_DIGEST_SIZE, SHA224_DIGEST_SIZE, s->state); }
static void mv_cesa_ahash_complete(struct crypto_async_request *req) { struct ahash_request *ahashreq = ahash_request_cast(req); struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); struct mv_cesa_engine *engine = creq->base.engine; unsigned int digsize; int i; digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); for (i = 0; i < digsize / 4; i++) creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i)); if (creq->last_req) { /* * Hardware's MD5 digest is in little endian format, but * SHA in big endian format */ if (creq->algo_le) { __le32 *result = (void *)ahashreq->result; for (i = 0; i < digsize / 4; i++) result[i] = cpu_to_le32(creq->state[i]); } else { __be32 *result = (void *)ahashreq->result; for (i = 0; i < digsize / 4; i++) result[i] = cpu_to_be32(creq->state[i]); } } atomic_sub(ahashreq->nbytes, &engine->load); }
static int ccp_aes_cmac_complete(struct crypto_async_request *async_req, int ret) { struct ahash_request *req = ahash_request_cast(async_req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); unsigned int digest_size = crypto_ahash_digestsize(tfm); if (ret) goto e_free; if (rctx->hash_rem) { /* Save remaining data to buffer */ unsigned int offset = rctx->nbytes - rctx->hash_rem; scatterwalk_map_and_copy(rctx->buf, rctx->src, offset, rctx->hash_rem, 0); rctx->buf_count = rctx->hash_rem; } else { rctx->buf_count = 0; } /* Update result area if supplied */ if (req->result) memcpy(req->result, rctx->iv, digest_size); e_free: sg_free_table(&rctx->data_sg); return ret; }
static void mv_process_hash_current(int first_block) { struct ahash_request *req = ahash_request_cast(cpg->cur_req); struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); struct req_progress *p = &cpg->p; struct sec_accel_config op = { 0 }; int is_last; switch (req_ctx->op) { case COP_SHA1: default: op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1; break; case COP_HMAC_SHA1: op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1; break; } op.mac_src_p = MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32) req_ctx-> count); setup_data_in(); op.mac_digest = MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len); op.mac_iv = MAC_INNER_IV_P(SRAM_HMAC_IV_IN) | MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT); is_last = req_ctx->last_chunk && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes) && (req_ctx->count <= MAX_HW_HASH_SIZE); if (req_ctx->first_hash) { if (is_last) op.config |= CFG_NOT_FRAG; else op.config |= CFG_FIRST_FRAG; req_ctx->first_hash = 0; } else { if (is_last) op.config |= CFG_LAST_FRAG; else op.config |= CFG_MID_FRAG; } memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); /* GO */ writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); /* * XXX: add timer if the interrupt does not occur for some mystery * reason */ }
static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); mv_cesa_ahash_dma_free_cache(&creq->req.dma); mv_cesa_dma_cleanup(&creq->base); }
static int crc32c_init(struct ahash_request *req) { u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); u32 *crcp = ahash_request_ctx(req); *crcp = *mctx; return 0; }
static int mv_cesa_ahash_update(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); creq->len += req->nbytes; return mv_cesa_ahash_queue_req(req); }
/* * chcr_handle_resp - Unmap the DMA buffers associated with the request * @req: crypto request */ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, int error_status) { struct crypto_tfm *tfm = req->tfm; struct chcr_context *ctx = crypto_tfm_ctx(tfm); struct uld_ctx *u_ctx = ULD_CTX(ctx); struct chcr_req_ctx ctx_req; struct cpl_fw6_pld *fw6_pld; unsigned int digestsize, updated_digestsize; switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_BLKCIPHER: ctx_req.req.ablk_req = (struct ablkcipher_request *)req; ctx_req.ctx.ablk_ctx = ablkcipher_request_ctx(ctx_req.req.ablk_req); if (!error_status) { fw6_pld = (struct cpl_fw6_pld *)input; memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2], AES_BLOCK_SIZE); } dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst, ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE); if (ctx_req.ctx.ablk_ctx->skb) { kfree_skb(ctx_req.ctx.ablk_ctx->skb); ctx_req.ctx.ablk_ctx->skb = NULL; } break; case CRYPTO_ALG_TYPE_AHASH: ctx_req.req.ahash_req = (struct ahash_request *)req; ctx_req.ctx.ahash_ctx = ahash_request_ctx(ctx_req.req.ahash_req); digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm( ctx_req.req.ahash_req)); updated_digestsize = digestsize; if (digestsize == SHA224_DIGEST_SIZE) updated_digestsize = SHA256_DIGEST_SIZE; else if (digestsize == SHA384_DIGEST_SIZE) updated_digestsize = SHA512_DIGEST_SIZE; if (ctx_req.ctx.ahash_ctx->skb) ctx_req.ctx.ahash_ctx->skb = NULL; if (ctx_req.ctx.ahash_ctx->result == 1) { ctx_req.ctx.ahash_ctx->result = 0; memcpy(ctx_req.req.ahash_req->result, input + sizeof(struct cpl_fw6_pld), digestsize); } else { memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input + sizeof(struct cpl_fw6_pld), updated_digestsize); } kfree(ctx_req.ctx.ahash_ctx->dummy_payload_ptr); ctx_req.ctx.ahash_ctx->dummy_payload_ptr = NULL; break; } return 0; }
static void mv_start_new_hash_req(struct ahash_request *req) { struct req_progress *p = &cpg->p; struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); int num_sgs, hw_bytes, old_extra_bytes, rc; cpg->cur_req = &req->base; memset(p, 0, sizeof(struct req_progress)); hw_bytes = req->nbytes + ctx->extra_bytes; old_extra_bytes = ctx->extra_bytes; if (unlikely(ctx->extra_bytes)) { memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer, ctx->extra_bytes); p->crypt_len = ctx->extra_bytes; } memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs)); if (unlikely(!ctx->first_hash)) { writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); } ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE; if (ctx->extra_bytes != 0 && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE)) hw_bytes -= ctx->extra_bytes; else ctx->extra_bytes = 0; num_sgs = count_sgs(req->src, req->nbytes); sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); if (hw_bytes) { p->hw_nbytes = hw_bytes; p->complete = mv_hash_algo_completion; p->process = mv_process_hash_current; mv_process_hash_current(1); } else { copy_src_to_buf(p, ctx->buffer + old_extra_bytes, ctx->extra_bytes - old_extra_bytes); sg_miter_stop(&p->src_sg_it); if (ctx->last_chunk) rc = mv_hash_final_fallback(req); else rc = 0; cpg->eng_st = ENGINE_IDLE; local_bh_disable(); req->base.complete(&req->base, rc); local_bh_enable(); } }
static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); struct mv_cesa_ahash_std_req *sreq = &creq->req.std; if (sreq->offset < (req->nbytes - creq->cache_ptr)) return -EINPROGRESS; return 0; }
static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status) { struct ahash_request *ahashreq = ahash_request_cast(req); struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) return mv_cesa_dma_process(&creq->base, status); return mv_cesa_ahash_std_process(ahashreq, status); }
static void mv_cesa_ahash_step(struct crypto_async_request *req) { struct ahash_request *ahashreq = ahash_request_cast(req); struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) mv_cesa_dma_step(&creq->base); else mv_cesa_ahash_std_step(ahashreq); }
static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) { struct ahash_request *req = ahash_request_cast(async_req); struct qce_sha_reqctx *rctx = ahash_request_ctx(req); struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm); struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); struct qce_device *qce = tmpl->qce; unsigned long flags = rctx->flags; int ret; if (IS_SHA_HMAC(flags)) { rctx->authkey = ctx->authkey; rctx->authklen = QCE_SHA_HMAC_KEY_SIZE; } else if (IS_CMAC(flags)) { rctx->authkey = ctx->authkey; rctx->authklen = AES_KEYSIZE_128; } rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); if (rctx->src_nents < 0) { dev_err(qce->dev, "Invalid numbers of src SG.\n"); return rctx->src_nents; } ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); if (ret < 0) return ret; sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); if (ret < 0) goto error_unmap_src; ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents, &rctx->result_sg, 1, qce_ahash_done, async_req); if (ret) goto error_unmap_dst; qce_dma_issue_pending(&qce->dma); ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0); if (ret) goto error_terminate; return 0; error_terminate: qce_dma_terminate_all(&qce->dma); error_unmap_dst: dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); error_unmap_src: dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); return ret; }
static int mv_cesa_ahash_finup(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; creq->len += req->nbytes; mv_cesa_set_mac_op_total_len(tmpl, creq->len); creq->last_req = true; return mv_cesa_ahash_queue_req(req); }
static int n2_hash_async_init(struct ahash_request *req) { struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_ahash_init(&rctx->fallback_req); }
static int sahara_sha_import(struct ahash_request *req, const void *in) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct sahara_ctx *ctx = crypto_ahash_ctx(ahash); struct sahara_sha_reqctx *rctx = ahash_request_ctx(req); memcpy(ctx, in, sizeof(struct sahara_ctx)); memcpy(rctx, in + sizeof(struct sahara_sha_reqctx), sizeof(struct sahara_sha_reqctx)); return 0; }
static int sahara_sha_export(struct ahash_request *req, void *out) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct sahara_ctx *ctx = crypto_ahash_ctx(ahash); struct sahara_sha_reqctx *rctx = ahash_request_ctx(req); memcpy(out, ctx, sizeof(struct sahara_ctx)); memcpy(out + sizeof(struct sahara_sha_reqctx), rctx, sizeof(struct sahara_sha_reqctx)); return 0; }
static int rk_ahash_export(struct ahash_request *req, void *out) { struct rk_ahash_rctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_ahash_export(&rctx->fallback_req, out); }