Ejemplo n.º 1
0
static int sahara_sha_enqueue(struct ahash_request *req, int last)
{
	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
	struct sahara_dev *dev = dev_ptr;
	int ret;

	if (!req->nbytes && !last)
		return 0;

	mutex_lock(&rctx->mutex);
	rctx->last = last;

	if (!rctx->active) {
		rctx->active = 1;
		rctx->first = 1;
	}

	mutex_lock(&dev->queue_mutex);
	ret = crypto_enqueue_request(&dev->queue, &req->base);
	mutex_unlock(&dev->queue_mutex);

	wake_up_process(dev->kthread);
	mutex_unlock(&rctx->mutex);

	return ret;
}
static int cns3xxx_handle_req(struct crypto_async_request *req)
{
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&cpg->lock, flags);
	ret = crypto_enqueue_request(&cpg->queue, req);
	spin_unlock_irqrestore(&cpg->lock, flags);
	wake_up_process(cpg->queue_th);
	return ret;
}
Ejemplo n.º 3
0
int spum_enqueue_request(struct crypto_async_request *req)
{
	int ret = 0;
	unsigned long flags;

	spin_lock_irqsave(&spum_dev->lock, flags);
	ret = crypto_enqueue_request(&spum_dev->spum_queue, req);
	spin_unlock_irqrestore(&spum_dev->lock, flags);

	return ret;
}
Ejemplo n.º 4
0
static int rk_crypto_enqueue(struct rk_crypto_info *dev,
			      struct crypto_async_request *async_req)
{
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&dev->lock, flags);
	ret = crypto_enqueue_request(&dev->queue, async_req);
	if (dev->busy) {
		spin_unlock_irqrestore(&dev->lock, flags);
		return ret;
	}
	dev->busy = true;
	spin_unlock_irqrestore(&dev->lock, flags);
	tasklet_schedule(&dev->queue_task);

	return ret;
}
Ejemplo n.º 5
0
int mv_cesa_queue_req(struct crypto_async_request *req)
{
	int ret;
	int i;

	spin_lock_bh(&cesa_dev->lock);
	ret = crypto_enqueue_request(&cesa_dev->queue, req);
	spin_unlock_bh(&cesa_dev->lock);

	if (ret != -EINPROGRESS)
		return ret;

	for (i = 0; i < cesa_dev->caps->nengines; i++) {
		spin_lock_bh(&cesa_dev->engines[i].lock);
		if (!cesa_dev->engines[i].req)
			mv_cesa_dequeue_req_unlocked(&cesa_dev->engines[i]);
		spin_unlock_bh(&cesa_dev->engines[i].lock);
	}

	return -EINPROGRESS;
}
static int rk_ahash_digest(struct ahash_request *req)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
	struct rk_crypto_info *dev = NULL;
	unsigned long flags;
	int ret;

	if (!req->nbytes)
		return zero_message_process(req);

	dev = tctx->dev;
	dev->total = req->nbytes;
	dev->left_bytes = req->nbytes;
	dev->aligned = 0;
	dev->mode = 0;
	dev->align_size = 4;
	dev->sg_dst = NULL;
	dev->sg_src = req->src;
	dev->first = req->src;
	dev->nents = sg_nents(req->src);

	switch (crypto_ahash_digestsize(tfm)) {
	case SHA1_DIGEST_SIZE:
		dev->mode = RK_CRYPTO_HASH_SHA1;
		break;
	case SHA256_DIGEST_SIZE:
		dev->mode = RK_CRYPTO_HASH_SHA256;
		break;
	case MD5_DIGEST_SIZE:
		dev->mode = RK_CRYPTO_HASH_MD5;
		break;
	default:
		return -EINVAL;
	}

	rk_ahash_reg_init(dev);

	spin_lock_irqsave(&dev->lock, flags);
	ret = crypto_enqueue_request(&dev->queue, &req->base);
	spin_unlock_irqrestore(&dev->lock, flags);

	tasklet_schedule(&dev->crypto_tasklet);

	/*
	 * it will take some time to process date after last dma transmission.
	 *
	 * waiting time is relative with the last date len,
	 * so cannot set a fixed time here.
	 * 10-50 makes system not call here frequently wasting
	 * efficiency, and make it response quickly when dma
	 * complete.
	 */
	while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
		usleep_range(10, 50);

	memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
		      crypto_ahash_digestsize(tfm));

	return 0;
}