static void mv_enqueue_new_req(struct ablkcipher_request *req)
{
	int num_sgs;

	cpg->cur_req = req;
	memset(&cpg->p, 0, sizeof(struct req_progress));

	num_sgs = count_sgs(req->src, req->nbytes);
	sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);

	num_sgs = count_sgs(req->dst, req->nbytes);
	sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
	mv_process_current_q(1);
}
static void mv_start_new_hash_req(struct ahash_request *req)
{
	struct req_progress *p = &cpg->p;
	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
	int num_sgs, hw_bytes, old_extra_bytes, rc;
	cpg->cur_req = &req->base;
	memset(p, 0, sizeof(struct req_progress));
	hw_bytes = req->nbytes + ctx->extra_bytes;
	old_extra_bytes = ctx->extra_bytes;

	if (unlikely(ctx->extra_bytes)) {
		memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
		       ctx->extra_bytes);
		p->crypt_len = ctx->extra_bytes;
	}

	memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs));

	if (unlikely(!ctx->first_hash)) {
		writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
		writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
		writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
		writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
		writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
	}

	ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
	if (ctx->extra_bytes != 0
	    && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
		hw_bytes -= ctx->extra_bytes;
	else
		ctx->extra_bytes = 0;

	num_sgs = count_sgs(req->src, req->nbytes);
	sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);

	if (hw_bytes) {
		p->hw_nbytes = hw_bytes;
		p->complete = mv_hash_algo_completion;
		p->process = mv_process_hash_current;

		mv_process_hash_current(1);
	} else {
		copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
				ctx->extra_bytes - old_extra_bytes);
		sg_miter_stop(&p->src_sg_it);
		if (ctx->last_chunk)
			rc = mv_hash_final_fallback(req);
		else
			rc = 0;
		cpg->eng_st = ENGINE_IDLE;
		local_bh_disable();
		req->base.complete(&req->base, rc);
		local_bh_enable();
	}
}
static void mv_start_new_crypt_req(struct ablkcipher_request *req)
{
	struct req_progress *p = &cpg->p;
	int num_sgs;

	cpg->cur_req = &req->base;
	memset(p, 0, sizeof(struct req_progress));
	p->hw_nbytes = req->nbytes;
	p->complete = mv_crypto_algo_completion;
	p->process = mv_process_current_q;
	p->copy_back = 1;

	num_sgs = count_sgs(req->src, req->nbytes);
	sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);

	num_sgs = count_sgs(req->dst, req->nbytes);
	sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);

	mv_process_current_q(1);
}