Beispiel #1
0
static int sahara_queue_manage(void *data)
{
	struct sahara_dev *dev = (struct sahara_dev *)data;
	struct crypto_async_request *async_req;
	struct crypto_async_request *backlog;
	int ret = 0;

	do {
		__set_current_state(TASK_INTERRUPTIBLE);

		mutex_lock(&dev->queue_mutex);
		backlog = crypto_get_backlog(&dev->queue);
		async_req = crypto_dequeue_request(&dev->queue);
		mutex_unlock(&dev->queue_mutex);

		if (backlog)
			backlog->complete(backlog, -EINPROGRESS);

		if (async_req) {
			if (crypto_tfm_alg_type(async_req->tfm) ==
			    CRYPTO_ALG_TYPE_AHASH) {
				struct ahash_request *req =
					ahash_request_cast(async_req);

				ret = sahara_sha_process(req);
			} else {
				struct ablkcipher_request *req =
					ablkcipher_request_cast(async_req);

				ret = sahara_aes_process(req);
			}

			async_req->complete(async_req, ret);

			continue;
		}

		schedule();
	} while (!kthread_should_stop());

	return 0;
}
Beispiel #2
0
static void dcp_queue_task(unsigned long data)
{
	struct dcp_dev *dev = (struct dcp_dev *) data;
	struct crypto_async_request *async_req, *backlog;
	struct crypto_ablkcipher *tfm;
	struct dcp_op *ctx;
	struct dcp_dev_req_ctx *rctx;
	struct ablkcipher_request *req;
	unsigned long flags;

	spin_lock_irqsave(&dev->queue_lock, flags);

	backlog = crypto_get_backlog(&dev->queue);
	async_req = crypto_dequeue_request(&dev->queue);

	spin_unlock_irqrestore(&dev->queue_lock, flags);

	if (!async_req)
		goto ret_nothing_done;

	if (backlog)
		backlog->complete(backlog, -EINPROGRESS);

	req = ablkcipher_request_cast(async_req);
	tfm = crypto_ablkcipher_reqtfm(req);
	rctx = ablkcipher_request_ctx(req);
	ctx = crypto_ablkcipher_ctx(tfm);

	if (!req->src || !req->dst)
		goto ret_nothing_done;

	ctx->flags |= rctx->mode;
	ctx->req = req;

	dcp_crypt(dev, ctx);

	return;

ret_nothing_done:
	clear_bit(DCP_FLAG_BUSY, &dev->flags);
}
void spum_queue_task(unsigned long data)
{
	struct crypto_async_request *async_req = NULL, *backlog = NULL;
	unsigned long flags;

	spin_lock_irqsave(&spum_dev->lock, flags);
	if (test_bit(FLAGS_BUSY, &spum_dev->flags)) {
		spin_unlock_irqrestore(&spum_dev->lock, flags);
		return;
	}

	backlog = crypto_get_backlog(&spum_dev->spum_queue);
	async_req = crypto_dequeue_request(&spum_dev->spum_queue);
	if (async_req)
		set_bit(FLAGS_BUSY, &spum_dev->flags);
	spin_unlock_irqrestore(&spum_dev->lock, flags);

	if (!async_req)
		return;

	if (backlog)
		backlog->complete(backlog, -EINPROGRESS);

	if (async_req->tfm->__crt_alg->cra_type == &crypto_ahash_type) {
		spum_dev->hash_dev->req = ahash_request_cast(async_req);
#if defined(CONFIG_CRYPTO_DEV_BRCM_SPUM_HASH)
		spum_hash_process_request(spum_dev->hash_dev);
#endif
	} else if (async_req->tfm->__crt_alg->cra_type ==
					&crypto_ablkcipher_type) {
		spum_dev->aes_dev->req = ablkcipher_request_cast(async_req);
#if defined(CONFIG_CRYPTO_DEV_BRCM_SPUM_AES)
		spum_aes_process_request(spum_dev->aes_dev);
#endif
	} else {
		pr_err("%s: Invalid crypto request!\n", __func__);
		return;
	}

	return;
}
Beispiel #4
0
static void s5p_tasklet_cb(unsigned long data)
{
	struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
	struct crypto_async_request *async_req, *backlog;
	struct s5p_aes_reqctx *reqctx;
	unsigned long flags;

	spin_lock_irqsave(&dev->lock, flags);
	backlog   = crypto_get_backlog(&dev->queue);
	async_req = crypto_dequeue_request(&dev->queue);
	spin_unlock_irqrestore(&dev->lock, flags);

	if (!async_req)
		return;

	if (backlog)
		backlog->complete(backlog, -EINPROGRESS);

	dev->req = ablkcipher_request_cast(async_req);
	dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
	reqctx   = ablkcipher_request_ctx(dev->req);

	s5p_aes_crypt_start(dev, reqctx->mode);
}
Beispiel #5
0
static int
qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
{
	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
	struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
	struct qce_device *qce = tmpl->qce;
	enum dma_data_direction dir_src, dir_dst;
	struct scatterlist *sg;
	bool diff_dst;
	gfp_t gfp;
	int ret;

	rctx->iv = req->info;
	rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
	rctx->cryptlen = req->nbytes;

	diff_dst = (req->src != req->dst) ? true : false;
	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
	dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;

	rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
	if (diff_dst)
		rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
	else
		rctx->dst_nents = rctx->src_nents;
	if (rctx->src_nents < 0) {
		dev_err(qce->dev, "Invalid numbers of src SG.\n");
		return rctx->src_nents;
	}
	if (rctx->dst_nents < 0) {
		dev_err(qce->dev, "Invalid numbers of dst SG.\n");
		return -rctx->dst_nents;
	}

	rctx->dst_nents += 1;

	gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
						GFP_KERNEL : GFP_ATOMIC;

	ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
	if (ret)
		return ret;

	sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);

	sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
	if (IS_ERR(sg)) {
		ret = PTR_ERR(sg);
		goto error_free;
	}

	sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
	if (IS_ERR(sg)) {
		ret = PTR_ERR(sg);
		goto error_free;
	}

	sg_mark_end(sg);
	rctx->dst_sg = rctx->dst_tbl.sgl;

	ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
	if (ret < 0)
		goto error_free;

	if (diff_dst) {
		ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
		if (ret < 0)
			goto error_unmap_dst;
		rctx->src_sg = req->src;
	} else {
		rctx->src_sg = rctx->dst_sg;
	}

	ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
			       rctx->dst_sg, rctx->dst_nents,
			       qce_ablkcipher_done, async_req);
	if (ret)
		goto error_unmap_src;

	qce_dma_issue_pending(&qce->dma);

	ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
	if (ret)
		goto error_terminate;

	return 0;

error_terminate:
	qce_dma_terminate_all(&qce->dma);
error_unmap_src:
	if (diff_dst)
		dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
error_unmap_dst:
	dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
error_free:
	sg_free_table(&rctx->dst_tbl);
	return ret;
}
Beispiel #6
0
static int aes_dma_start(struct aes_hwa_ctx *ctx)
{
	int err, fast = 0, in, out;
	size_t count;
	dma_addr_t addr_in, addr_out;
	struct omap_dma_channel_params dma_params;
	struct tf_crypto_aes_operation_state *state =
		crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(ctx->req));
	static size_t last_count;
	unsigned long flags;

	in = IS_ALIGNED((u32)ctx->in_sg->offset, sizeof(u32));
	out = IS_ALIGNED((u32)ctx->out_sg->offset, sizeof(u32));

	fast = in && out;

	if (fast) {
		count = min(ctx->total, sg_dma_len(ctx->in_sg));
		count = min(count, sg_dma_len(ctx->out_sg));

		if (count != ctx->total)
			return -EINVAL;

		/* Only call dma_map_sg if it has not yet been done */
		if (!(ctx->req->base.flags & CRYPTO_TFM_REQ_DMA_VISIBLE)) {
			err = dma_map_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
			if (!err)
				return -EINVAL;

			err = dma_map_sg(NULL, ctx->out_sg, 1, DMA_FROM_DEVICE);
			if (!err) {
				dma_unmap_sg(
					NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
				return -EINVAL;
			}
		}
		ctx->req->base.flags &= ~CRYPTO_TFM_REQ_DMA_VISIBLE;

		addr_in = sg_dma_address(ctx->in_sg);
		addr_out = sg_dma_address(ctx->out_sg);

		ctx->flags |= FLAGS_FAST;
	} else {
		count = sg_copy(&ctx->in_sg, &ctx->in_offset, ctx->buf_in,
			ctx->buflen, ctx->total, 0);
		addr_in = ctx->dma_addr_in;
		addr_out = ctx->dma_addr_out;

		ctx->flags &= ~FLAGS_FAST;
	}

	ctx->total -= count;

	/* Configure HWA */
	tf_crypto_enable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG);

	tf_aes_restore_registers(state, ctx->flags & FLAGS_ENCRYPT ? 1 : 0);

	OUTREG32(&paes_reg->AES_SYSCONFIG, INREG32(&paes_reg->AES_SYSCONFIG)
		| AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT
		| AES_SYSCONFIG_DMA_REQ_IN_EN_BIT);

	ctx->dma_size = count;
	if (!fast)
		dma_sync_single_for_device(NULL, addr_in, count,
			DMA_TO_DEVICE);

	dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
	dma_params.frame_count = count / AES_BLOCK_SIZE;
	dma_params.elem_count = DMA_CEN_Elts_per_Frame_AES;
	dma_params.src_ei = 0;
	dma_params.src_fi = 0;
	dma_params.dst_ei = 0;
	dma_params.dst_fi = 0;
	dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
	dma_params.read_prio = 0;
	dma_params.write_prio = 0;

	/* IN */
	dma_params.trigger = ctx->dma_in;
	dma_params.src_or_dst_synch = OMAP_DMA_DST_SYNC;
	dma_params.dst_start = AES1_REGS_HW_ADDR + 0x60;
	dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT;
	dma_params.src_start = addr_in;
	dma_params.src_amode = OMAP_DMA_AMODE_POST_INC;

	if (reconfigure_dma) {
		omap_set_dma_params(ctx->dma_lch_in, &dma_params);
		omap_set_dma_dest_burst_mode(ctx->dma_lch_in,
			OMAP_DMA_DATA_BURST_8);
		omap_set_dma_src_burst_mode(ctx->dma_lch_in,
			OMAP_DMA_DATA_BURST_8);
		omap_set_dma_src_data_pack(ctx->dma_lch_in, 1);
	} else {
		if (last_count != count)
			omap_set_dma_transfer_params(ctx->dma_lch_in,
				dma_params.data_type,
				dma_params.elem_count, dma_params.frame_count,
				dma_params.sync_mode, dma_params.trigger,
				dma_params.src_or_dst_synch);

		/* Configure input start address */
		__raw_writel(dma_params.src_start,
			omap_dma_base + (0x60 * (ctx->dma_lch_in) + 0x9c));
	}

	/* OUT */
	dma_params.trigger = ctx->dma_out;
	dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
	dma_params.src_start = AES1_REGS_HW_ADDR + 0x60;
	dma_params.src_amode = OMAP_DMA_AMODE_CONSTANT;
	dma_params.dst_start = addr_out;
	dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC;

	if (reconfigure_dma) {
		omap_set_dma_params(ctx->dma_lch_out, &dma_params);
		omap_set_dma_dest_burst_mode(ctx->dma_lch_out,
			OMAP_DMA_DATA_BURST_8);
		omap_set_dma_src_burst_mode(ctx->dma_lch_out,
			OMAP_DMA_DATA_BURST_8);
		omap_set_dma_dest_data_pack(ctx->dma_lch_out, 1);
		reconfigure_dma = false;
	} else {
		if (last_count != count) {
			omap_set_dma_transfer_params(ctx->dma_lch_out,
				dma_params.data_type,
				dma_params.elem_count, dma_params.frame_count,
				dma_params.sync_mode, dma_params.trigger,
				dma_params.src_or_dst_synch);
			last_count = count;
		}
		/* Configure output start address */
		__raw_writel(dma_params.dst_start,
			omap_dma_base + (0x60 * (ctx->dma_lch_out) + 0xa0));
	}

	/* Is this really needed? */
	omap_enable_dma_irq(ctx->dma_lch_in, OMAP_DMA_BLOCK_IRQ);
	omap_enable_dma_irq(ctx->dma_lch_out, OMAP_DMA_BLOCK_IRQ);

	wmb();

	omap_start_dma(ctx->dma_lch_in);
	omap_start_dma(ctx->dma_lch_out);

	spin_lock_irqsave(&ctx->lock, flags);
	if (ctx->next_req) {
		struct ablkcipher_request *req =
			ablkcipher_request_cast(ctx->next_req);

		if (!(ctx->next_req->flags & CRYPTO_TFM_REQ_DMA_VISIBLE)) {
			err = dma_map_sg(NULL, req->src, 1, DMA_TO_DEVICE);
			if (!err) {
				/* Silently fail for now... */
				spin_unlock_irqrestore(&ctx->lock, flags);
				return 0;
			}

			err = dma_map_sg(NULL, req->dst, 1, DMA_FROM_DEVICE);
			if (!err) {
				dma_unmap_sg(NULL, req->src, 1, DMA_TO_DEVICE);
				/* Silently fail for now... */
				spin_unlock_irqrestore(&ctx->lock, flags);
				return 0;
			}

			ctx->next_req->flags |= CRYPTO_TFM_REQ_DMA_VISIBLE;
			ctx->next_req = NULL;
		}
	}

	if (ctx->backlog) {
		ctx->backlog->complete(ctx->backlog, -EINPROGRESS);
		ctx->backlog = NULL;
	}
	spin_unlock_irqrestore(&ctx->lock, flags);

	return 0;
}
static void cns3xxx_process_current_q(int first_block)
{
	struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
	struct cns3xxx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
	struct cns3xxx_req_ctx *req_ctx = ablkcipher_request_ctx(req);
	struct sec_accel_config op;
	struct accel_config in_op;
	int i;

	switch (req_ctx->op) {
	case COP_AES_ECB:
		op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
		break;
	case COP_AES_CBC:
	default:
		op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
		
		//if (first_block)
		{
			memcpy(cpg->in_buf + IN_DATA_IV_P, req->info, 16);
		}
		break;
	}

        switch (ctx->key_len) {
        case AES_KEYSIZE_128:
                in_op.aes_type = AES_128;

        //printk("AES 128\n");
                break;
        case AES_KEYSIZE_192:
                in_op.aes_type = AES_192;
        //printk("AES 192\n");
                break;
        case AES_KEYSIZE_256:
                in_op.aes_type = AES_256;
        //printk("AES 256\n");
                break;
        }


	if (req_ctx->decrypt) {
		in_op.enc = 0;
		memcpy(cpg->in_buf + IN_DATA_KEY_P, ctx->aes_enc_key,
				ctx->key_len);
	} else {
		in_op.enc = 1;
                memcpy(cpg->in_buf + IN_DATA_KEY_P, ctx->aes_enc_key,
                                ctx->key_len);
	}


	setup_data_in(ctx->key_len);
	in_op.msg_len = cpg->p.crypt_len;

       	in_op.inv.bufcnt = cpg->src_num_sgs;
	for(i=0;i< cpg->src_num_sgs;i++)
	{
		in_op.inv.bufptr[i] = (u32 *)cpg->in_buf;
        	in_op.inv.bufsize[i] = in_op.msg_len;
	}

       	in_op.outv.bufcnt = cpg->dst_num_sgs;
        for(i=0;i< cpg->dst_num_sgs;i++)
        {
                in_op.outv.bufptr[i] = (u32 *)cpg->out_buf;
                in_op.outv.bufsize[i] = in_op.msg_len;
        }

	/* GO */

// dump input data
#if 0
	if (req_ctx->decrypt) {
        	printk("Key Pattern =");
        	for(i=0;i < 48;i++)
                	printk("%2x",cpg->in_buf[i]);
		if(((i+1)%16)==0)
			printk("\n");
	}
#endif


        if (p_KernAes != NULL)
        {
                (*p_KernAes)(n1_list->data, in_op.enc, in_op.aes_type, 0, in_op.msg_len, &in_op.inv, &in_op.outv,crypto_int, NULL); //(void *)csp1_request); 

        }
        else
                printk(KERN_CRIT "kernel_shim: Csp1EncryptAes: symbol_get(n1_EncryptAes) failed\n");

	/*
	 * XXX: add timer if the interrupt does not occur for some mystery
	 * reason
	 */
}