static void rk_crypto_tasklet_cb(unsigned long data)
{
	struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
	struct crypto_async_request *async_req, *backlog;
	unsigned long flags;
	int err = 0;

	spin_lock_irqsave(&dev->lock, flags);
	backlog   = crypto_get_backlog(&dev->queue);
	async_req = crypto_dequeue_request(&dev->queue);
	spin_unlock_irqrestore(&dev->lock, flags);
	if (!async_req) {
		dev_err(dev->dev, "async_req is NULL !!\n");
		return;
	}
	if (backlog) {
		backlog->complete(backlog, -EINPROGRESS);
		backlog = NULL;
	}

	if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER)
		dev->ablk_req = ablkcipher_request_cast(async_req);
	else
		dev->ahash_req = ahash_request_cast(async_req);
	err = dev->start(dev);
	if (err)
		dev->complete(dev, err);
}
예제 #2
0
static void rk_crypto_queue_task_cb(unsigned long data)
{
	struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
	struct crypto_async_request *async_req, *backlog;
	unsigned long flags;
	int err = 0;

	dev->err = 0;
	spin_lock_irqsave(&dev->lock, flags);
	backlog   = crypto_get_backlog(&dev->queue);
	async_req = crypto_dequeue_request(&dev->queue);

	if (!async_req) {
		dev->busy = false;
		spin_unlock_irqrestore(&dev->lock, flags);
		return;
	}
	spin_unlock_irqrestore(&dev->lock, flags);

	if (backlog) {
		backlog->complete(backlog, -EINPROGRESS);
		backlog = NULL;
	}

	dev->async_req = async_req;
	err = dev->start(dev);
	if (err)
		dev->complete(dev->async_req, err);
}
예제 #3
0
void sunxi_ss_work(struct work_struct *work)
{
	int ret = 0;
    unsigned long flags = 0;
	sunxi_ss_t *sss = container_of(work, sunxi_ss_t, work);
	struct crypto_async_request *async_req = NULL;
	struct crypto_async_request *backlog = NULL;

	/* empty the crypto queue and then return */
	do {
		spin_lock_irqsave(&sss->lock, flags);
		backlog = crypto_get_backlog(&sss->queue);
		async_req = crypto_dequeue_request(&sss->queue);
		spin_unlock_irqrestore(&sss->lock, flags);

		if (!async_req) {
			SS_DBG("async_req is NULL! \n");
			break;
		}

		if (backlog)
			backlog->complete(backlog, -EINPROGRESS);

		SS_DBG("async_req->flags = %#x \n", async_req->flags);
		if (async_req->flags & SS_FLAG_AES)
			ret = ss_aes_one_req(sss, ablkcipher_request_cast(async_req));
		else if (async_req->flags & SS_FLAG_HASH)
			ret = ss_hash_one_req(sss, ahash_request_cast(async_req));
	} while (!ret);
}
예제 #4
0
static void sahara_aes_queue_task(unsigned long data)
{
	struct sahara_dev *dev = (struct sahara_dev *)data;
	struct crypto_async_request *async_req, *backlog;
	struct sahara_ctx *ctx;
	struct sahara_aes_reqctx *rctx;
	struct ablkcipher_request *req;
	int ret;

	spin_lock(&dev->lock);
	backlog = crypto_get_backlog(&dev->queue);
	async_req = crypto_dequeue_request(&dev->queue);
	if (!async_req)
		clear_bit(FLAGS_BUSY, &dev->flags);
	spin_unlock(&dev->lock);

	if (!async_req)
		return;

	if (backlog)
		backlog->complete(backlog, -EINPROGRESS);

	req = ablkcipher_request_cast(async_req);

	/* Request is ready to be dispatched by the device */
	dev_dbg(dev->device,
		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
		req->nbytes, req->src, req->dst);

	/* assign new request to device */
	dev->req = req;
	dev->total = req->nbytes;
	dev->in_sg = req->src;
	dev->out_sg = req->dst;

	rctx = ablkcipher_request_ctx(req);
	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
	rctx->mode &= FLAGS_MODE_MASK;
	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;

	if ((dev->flags & FLAGS_CBC) && req->info)
		memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);

	/* assign new context to device */
	ctx->dev = dev;
	dev->ctx = ctx;

	ret = sahara_hw_descriptor_create(dev);
	if (ret < 0) {
		spin_lock(&dev->lock);
		clear_bit(FLAGS_BUSY, &dev->flags);
		spin_unlock(&dev->lock);
		dev->req->base.complete(&dev->req->base, ret);
	}
}
static int queue_manag(void *data)
{
	cpg->eng_st = ENGINE_IDLE;
	do {
		struct crypto_async_request *async_req = NULL;
		struct crypto_async_request *backlog;

		__set_current_state(TASK_INTERRUPTIBLE);

		if (cpg->eng_st == ENGINE_W_DEQUEUE)
			dequeue_complete_req();

		spin_lock_irq(&cpg->lock);
		if (cpg->eng_st == ENGINE_IDLE) {
			backlog = crypto_get_backlog(&cpg->queue);
			async_req = crypto_dequeue_request(&cpg->queue);
			if (async_req) {
				BUG_ON(cpg->eng_st != ENGINE_IDLE);
				cpg->eng_st = ENGINE_BUSY;
			}
		}
		spin_unlock_irq(&cpg->lock);

		if (backlog) {
			backlog->complete(backlog, -EINPROGRESS);
			backlog = NULL;
		}

		if (async_req) {
			/*if (async_req->tfm->__crt_alg->cra_type !=
			    &crypto_ahash_type) {*/
				struct ablkcipher_request *req =
				    container_of(async_req,
						 struct ablkcipher_request,
						 base);
				cns3xxx_start_new_crypt_req(req);
			/*} else {
				struct ahash_request *req =
				    ahash_request_cast(async_req);
				cns3xxx_start_new_hash_req(req);
			}*/
			async_req = NULL;
		}

		schedule();

	} while (!kthread_should_stop());


	return 0;
}
예제 #6
0
static int aes_handle_req(struct aes_hwa_ctx *ctx)
{
	struct tf_crypto_aes_operation_state *state;
	struct crypto_async_request *async_req;
	struct ablkcipher_request *req;
	struct aes_reqctx *rctx;
	unsigned long flags;

	if (ctx->total)
		goto start;

	spin_lock_irqsave(&ctx->lock, flags);
	ctx->backlog = crypto_get_backlog(&ctx->queue);
	async_req = crypto_dequeue_request(&ctx->queue);
	if (!async_req)
		clear_bit(FLAGS_BUSY, &ctx->flags);
	spin_unlock_irqrestore(&ctx->lock, flags);

	if (!async_req)
		return 0;

	req = ablkcipher_request_cast(async_req);

	ctx->req = req;
	ctx->total = req->nbytes;
	ctx->in_offset = 0;
	ctx->in_sg = req->src;
	ctx->out_offset = 0;
	ctx->out_sg = req->dst;

	rctx = ablkcipher_request_ctx(req);
	rctx->mode &= FLAGS_MODE_MASK;
	ctx->flags = (ctx->flags & ~FLAGS_MODE_MASK) | rctx->mode;

	/*
	 * Try to get the next pending request so it can be prepared while the
	 * first one is being processed.
	 */
	spin_lock_irqsave(&ctx->lock, flags);

	if (likely(ctx->queue.qlen)) {
		struct list_head *next_async_req;

		next_async_req = ctx->queue.list.next;
		ctx->next_req = list_entry(next_async_req,
			struct crypto_async_request, list);
	} else {
예제 #7
0
파일: sahara.c 프로젝트: GongZiYuan/linux
static int sahara_queue_manage(void *data)
{
	struct sahara_dev *dev = (struct sahara_dev *)data;
	struct crypto_async_request *async_req;
	struct crypto_async_request *backlog;
	int ret = 0;

	do {
		__set_current_state(TASK_INTERRUPTIBLE);

		mutex_lock(&dev->queue_mutex);
		backlog = crypto_get_backlog(&dev->queue);
		async_req = crypto_dequeue_request(&dev->queue);
		mutex_unlock(&dev->queue_mutex);

		if (backlog)
			backlog->complete(backlog, -EINPROGRESS);

		if (async_req) {
			if (crypto_tfm_alg_type(async_req->tfm) ==
			    CRYPTO_ALG_TYPE_AHASH) {
				struct ahash_request *req =
					ahash_request_cast(async_req);

				ret = sahara_sha_process(req);
			} else {
				struct ablkcipher_request *req =
					ablkcipher_request_cast(async_req);

				ret = sahara_aes_process(req);
			}

			async_req->complete(async_req, ret);

			continue;
		}

		schedule();
	} while (!kthread_should_stop());

	return 0;
}
static int queue_manag(void *data)
{
	cpg->eng_st = ENGINE_IDLE;
	do {
		struct ablkcipher_request *req;
		struct crypto_async_request *async_req = NULL;
		struct crypto_async_request *backlog;

		__set_current_state(TASK_INTERRUPTIBLE);

		if (cpg->eng_st == ENGINE_W_DEQUEUE)
			dequeue_complete_req();

		spin_lock_irq(&cpg->lock);
		if (cpg->eng_st == ENGINE_IDLE) {
			backlog = crypto_get_backlog(&cpg->queue);
			async_req = crypto_dequeue_request(&cpg->queue);
			if (async_req) {
				BUG_ON(cpg->eng_st != ENGINE_IDLE);
				cpg->eng_st = ENGINE_BUSY;
			}
		}
		spin_unlock_irq(&cpg->lock);

		if (backlog) {
			backlog->complete(backlog, -EINPROGRESS);
			backlog = NULL;
		}

		if (async_req) {
			req = container_of(async_req,
					struct ablkcipher_request, base);
			mv_enqueue_new_req(req);
			async_req = NULL;
		}

		schedule();

	} while (!kthread_should_stop());
	return 0;
}
예제 #9
0
파일: dcp.c 프로젝트: BozkurTR/kernel
static void dcp_queue_task(unsigned long data)
{
	struct dcp_dev *dev = (struct dcp_dev *) data;
	struct crypto_async_request *async_req, *backlog;
	struct crypto_ablkcipher *tfm;
	struct dcp_op *ctx;
	struct dcp_dev_req_ctx *rctx;
	struct ablkcipher_request *req;
	unsigned long flags;

	spin_lock_irqsave(&dev->queue_lock, flags);

	backlog = crypto_get_backlog(&dev->queue);
	async_req = crypto_dequeue_request(&dev->queue);

	spin_unlock_irqrestore(&dev->queue_lock, flags);

	if (!async_req)
		goto ret_nothing_done;

	if (backlog)
		backlog->complete(backlog, -EINPROGRESS);

	req = ablkcipher_request_cast(async_req);
	tfm = crypto_ablkcipher_reqtfm(req);
	rctx = ablkcipher_request_ctx(req);
	ctx = crypto_ablkcipher_ctx(tfm);

	if (!req->src || !req->dst)
		goto ret_nothing_done;

	ctx->flags |= rctx->mode;
	ctx->req = req;

	dcp_crypt(dev, ctx);

	return;

ret_nothing_done:
	clear_bit(DCP_FLAG_BUSY, &dev->flags);
}
예제 #10
0
void spum_queue_task(unsigned long data)
{
	struct crypto_async_request *async_req = NULL, *backlog = NULL;
	unsigned long flags;

	spin_lock_irqsave(&spum_dev->lock, flags);
	if (test_bit(FLAGS_BUSY, &spum_dev->flags)) {
		spin_unlock_irqrestore(&spum_dev->lock, flags);
		return;
	}

	backlog = crypto_get_backlog(&spum_dev->spum_queue);
	async_req = crypto_dequeue_request(&spum_dev->spum_queue);
	if (async_req)
		set_bit(FLAGS_BUSY, &spum_dev->flags);
	spin_unlock_irqrestore(&spum_dev->lock, flags);

	if (!async_req)
		return;

	if (backlog)
		backlog->complete(backlog, -EINPROGRESS);

	if (async_req->tfm->__crt_alg->cra_type == &crypto_ahash_type) {
		spum_dev->hash_dev->req = ahash_request_cast(async_req);
#if defined(CONFIG_CRYPTO_DEV_BRCM_SPUM_HASH)
		spum_hash_process_request(spum_dev->hash_dev);
#endif
	} else if (async_req->tfm->__crt_alg->cra_type ==
					&crypto_ablkcipher_type) {
		spum_dev->aes_dev->req = ablkcipher_request_cast(async_req);
#if defined(CONFIG_CRYPTO_DEV_BRCM_SPUM_AES)
		spum_aes_process_request(spum_dev->aes_dev);
#endif
	} else {
		pr_err("%s: Invalid crypto request!\n", __func__);
		return;
	}

	return;
}
예제 #11
0
static void mv_cesa_dequeue_req_unlocked(struct mv_cesa_engine *engine)
{
	struct crypto_async_request *req, *backlog;
	struct mv_cesa_ctx *ctx;

	spin_lock_bh(&cesa_dev->lock);
	backlog = crypto_get_backlog(&cesa_dev->queue);
	req = crypto_dequeue_request(&cesa_dev->queue);
	engine->req = req;
	spin_unlock_bh(&cesa_dev->lock);

	if (!req)
		return;

	if (backlog)
		backlog->complete(backlog, -EINPROGRESS);

	ctx = crypto_tfm_ctx(req->tfm);
	ctx->ops->prepare(req, engine);
	ctx->ops->step(req);
}
예제 #12
0
파일: cryptd.c 프로젝트: 274914765/C
static int cryptd_thread(void *data)
{
    struct cryptd_state *state = data;
    int stop;

    current->flags |= PF_NOFREEZE;

    do {
        struct crypto_async_request *req, *backlog;

        mutex_lock(&state->mutex);
        __set_current_state(TASK_INTERRUPTIBLE);

        spin_lock_bh(&state->lock);
        backlog = crypto_get_backlog(&state->queue);
        req = crypto_dequeue_request(&state->queue);
        spin_unlock_bh(&state->lock);

        stop = kthread_should_stop();

        if (stop || req) {
            __set_current_state(TASK_RUNNING);
            if (req) {
                if (backlog)
                    backlog->complete(backlog,
                              -EINPROGRESS);
                req->complete(req, 0);
            }
        }

        mutex_unlock(&state->mutex);

        schedule();
    } while (!stop);

    return 0;
}
예제 #13
0
static void s5p_tasklet_cb(unsigned long data)
{
	struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
	struct crypto_async_request *async_req, *backlog;
	struct s5p_aes_reqctx *reqctx;
	unsigned long flags;

	spin_lock_irqsave(&dev->lock, flags);
	backlog   = crypto_get_backlog(&dev->queue);
	async_req = crypto_dequeue_request(&dev->queue);
	spin_unlock_irqrestore(&dev->lock, flags);

	if (!async_req)
		return;

	if (backlog)
		backlog->complete(backlog, -EINPROGRESS);

	dev->req = ablkcipher_request_cast(async_req);
	dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
	reqctx   = ablkcipher_request_ctx(dev->req);

	s5p_aes_crypt_start(dev, reqctx->mode);
}