Ejemplo n.º 1
0
int ss_hash_update(struct ahash_request *req)
{
	int err = 0;
	unsigned long flags = 0;

	if (!req->nbytes) {
		SS_ERR("Invalid length: %d. \n", req->nbytes);
		return 0;
	}

	SS_DBG("Flags: %#x, len = %d \n", req->base.flags, req->nbytes);
	if (ss_dev->suspend) {
		SS_ERR("SS has already suspend. \n");
		return -EAGAIN;
	}

	req->base.flags |= SS_FLAG_HASH;

	spin_lock_irqsave(&ss_dev->lock, flags);
	err = ahash_enqueue_request(&ss_dev->queue, req);
	spin_unlock_irqrestore(&ss_dev->lock, flags);

	queue_work(ss_dev->workqueue, &ss_dev->work);
	return err;
}
Ejemplo n.º 2
0
static int __devinit sunxi_ss_probe(struct platform_device *pdev)
{
	int ret = 0;
	sunxi_ss_t *sss = NULL;

	sss = devm_kzalloc(&pdev->dev, sizeof(sunxi_ss_t), GFP_KERNEL);
	if (sss == NULL) {
		SS_ERR("Unable to allocate sunxi_ss_t\n");
		return -ENOMEM;
	}

	snprintf(sss->dev_name, sizeof(sss->dev_name), SUNXI_SS_DEV_NAME);
	platform_set_drvdata(pdev, sss);

	ret = sunxi_ss_res_request(pdev);
	if (ret != 0) {
		goto err0;
	}

    sss->pdev = pdev;

	ret = sunxi_ss_hw_init(sss);
	if (ret != 0) {
		SS_ERR("SS hw init failed!\n");
		goto err1;
	}

	spin_lock_init(&sss->lock);
	INIT_WORK(&sss->work, sunxi_ss_work);
	crypto_init_queue(&sss->queue, 16);

	sss->workqueue = create_singlethread_workqueue(sss->dev_name);
	if (sss->workqueue == NULL) {
		SS_ERR("Unable to create workqueue\n");
		ret = -EPERM;
		goto err2;
	}

	ret = sunxi_ss_alg_register();
	if (ret != 0) {
		SS_ERR("sunxi_ss_alg_register() failed! return %d \n", ret);
		goto err3;
	}

	sunxi_ss_sysfs_create(pdev);

	ss_dev = sss;
	SS_DBG("SS driver probe succeed, base 0x%p, irq %d!\n", sss->base_addr, sss->irq);
	return 0;

err3:
	destroy_workqueue(sss->workqueue);
err2:
	sunxi_ss_hw_exit(sss);
err1:
	sunxi_ss_res_release(sss);
err0:
	platform_set_drvdata(pdev, NULL);
	return ret;
}
Ejemplo n.º 3
0
static void
kclient_report(void)
{
	SS_ERR("Initiated %d connects\n",
		KCLIENT_NTHREADS * KCLIENT_NCONNECTS);
	SS_ERR("Of those %d connects initiated successfully\n",
		atomic_read(&kclient_connect_nattempt));
	SS_ERR("Of those %d connections were established successfully\n",
		atomic_read(&kclient_connect_ncomplete));
	SS_ERR("and %d connections completed with error\n",
		atomic_read(&kclient_connect_nerror));
}
Ejemplo n.º 4
0
/* ctx - only used for HASH. */
static int ss_dma_src_config(sunxi_ss_t *sss, void *ctx, ss_aes_req_ctx_t *req_ctx, int len, int cb)
{
	int nents = 0;
	int npages = 0;
	ss_dma_info_t *info = &req_ctx->dma_src;
	struct dma_slave_config dma_conf = {0};
	struct dma_async_tx_descriptor *dma_desc = NULL;

	info->dir = DMA_MEM_TO_DEV;
	dma_conf.direction = info->dir;
	dma_conf.dst_addr = sss->base_addr_phy + SS_REG_RXFIFO;
	dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	dma_conf.src_maxburst = 1;
	dma_conf.dst_maxburst = 1;
	dma_conf.slave_id = sunxi_slave_id(DRQDST_SS, DRQSRC_SDRAM);
	dmaengine_slave_config(info->chan, &dma_conf);

	npages = ss_sg_cnt(info->sg, len);
	WARN_ON(npages == 0);

	nents = dma_map_sg(&sss->pdev->dev, info->sg, npages, info->dir);
	SS_DBG("npages = %d, nents = %d, len = %d, sg.len = %d \n", npages, nents, len, sg_dma_len(info->sg));
	if (!nents) {
		SS_ERR("dma_map_sg() error\n");
		return -EINVAL;
	}

	info->nents = nents;

	if (SS_METHOD_IS_HASH(req_ctx->type)) {
		ss_hash_padding_sg_prepare(&info->sg[nents-1], len);

		/* Total len is too small, so there is no data for DMA. */
		if (len < SHA1_BLOCK_SIZE)
			return 1;
	}

	dma_desc = dmaengine_prep_slave_sg(info->chan, info->sg, nents,
				info->dir,	DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!dma_desc) {
		SS_ERR("dmaengine_prep_slave_sg() failed!\n");
		return -1;
	}

	if (cb == 1) {
		dma_desc->callback = ss_dma_cb;
		dma_desc->callback_param = (void *)req_ctx;
	}
	dmaengine_submit(dma_desc);
	return 0;
}
Ejemplo n.º 5
0
static int __init
kclient_init(void)
{
	int i, ret = 0;
	struct task_struct *task;

	if (tfw_addr_pton(server, &kclient_server_address)) {
		SS_ERR("Unable to parse server's address: %s", server);
		return -EINVAL;
	}
	SS_ERR("Started kclient module, server's address is %s\n", server);

	task = kthread_create(kclient_thread_finish, 0,
			      "kclient_thread_finish");
	if (IS_ERR_OR_NULL(task)) {
		ret = PTR_ERR(task);
		SS_ERR("Unable to create thread: %s (%d)\n",
		       "kclient_finish_task", ret);
		return ret;
	}
	kclient_finish_task = task;

	for (i = 0; i < KCLIENT_NTHREADS; i++) {
		task = kthread_create(kclient_thread_connect, (void *)(long)i,
				      "kclient_thread_connect_%02d", i);
		if (IS_ERR_OR_NULL(task)) {
			ret = PTR_ERR(task);
			SS_ERR("Unable to create a thread: %s%02d (%d)\n",
				"kclient_thread_connect", i, ret);
			break;
		}
		kclient_connect_task[i] = task;
	}
	if (ret) {
		kclient_stop_threads();
	} else {
		atomic_set(&kclient_nthreads, KCLIENT_NTHREADS);
		for (i = 0; i < KCLIENT_NTHREADS; i++) {
			wake_up_process(kclient_connect_task[i]);
		}
		SS_ERR("Started %d threads to initiate %d connects each\n",
			KCLIENT_NTHREADS, KCLIENT_NCONNECTS);
		wait_event_interruptible(kclient_connect_wq,
					 atomic_read(&kclient_nthreads) == 0);
		wake_up_process(kclient_finish_task);
	}
	return ret;
}
Ejemplo n.º 6
0
static int
kclient_thread_finish(void *data)
{
	int nattempt = atomic_read(&kclient_connect_nattempt);
	uint64_t time_max = (uint64_t)get_seconds() + KCLIENT_WAIT_MAX;

	set_freezable();
	do {
		long timeout = KCLIENT_WAIT_INTVL;
		int nerror = atomic_read(&kclient_connect_nerror);
		int ncomplete = atomic_read(&kclient_connect_ncomplete);

		if (ncomplete + nerror == nattempt) {
			break;
		}
		wait_event_freezable_timeout(kclient_finish_wq,
					     kthread_should_stop(),
					     timeout);
		if ((uint64_t)get_seconds() > time_max) {
			SS_ERR("%s exceeded maximum wait time of %d seconds\n",
				"kclient_thread_finish", KCLIENT_WAIT_MAX);
			break;
		}
	} while (!kthread_should_stop());

	kclient_release_sockets();
	kclient_finish_task = NULL;
	return 0;
}
Ejemplo n.º 7
0
int ss_rng_get_random(struct crypto_rng *tfm, u8 *rdata, unsigned int dlen)
{
	int ret = 0;
	ss_aes_ctx_t *ctx = crypto_rng_ctx(tfm);

	SS_DBG("flow = %d, rdata = %p, len = %d \n", ctx->comm.flow, rdata, dlen);
	if (ss_dev->suspend) {
		SS_ERR("SS has already suspend. \n");
		return -EAGAIN;
	}

	ss_dev_lock();

	/* Must set the seed addr in PRNG/TRNG. */
	ss_key_set(ctx->key, ctx->key_size);
	dma_map_single(&ss_dev->pdev->dev, ctx->key, ctx->key_size, DMA_MEM_TO_DEV);

	ret = ss_rng_start(ctx, rdata, dlen);
	ss_dev_unlock();

	SS_DBG("Get %d byte random. \n", ret);

	dma_unmap_single(&ss_dev->pdev->dev, virt_to_phys(ctx->key), ctx->key_size, DMA_MEM_TO_DEV);

	return ret;
}
Ejemplo n.º 8
0
/* Make a sg_table based on sg[] of crypto request. */
static int ss_sg_table_init(struct sg_table *sgt, struct scatterlist *sg,
							int len, char *vbase, dma_addr_t pbase)
{
	int i;
	int npages = 0;
	int offset = 0;
	struct scatterlist *src_sg = sg;
	struct scatterlist *dst_sg = NULL;

	npages = ss_sg_cnt(sg, len);
	WARN_ON(npages == 0);

	if (sg_alloc_table(sgt, npages, GFP_KERNEL)) {
		SS_ERR("sg_alloc_table(%d) failed!\n", npages);
		WARN_ON(1);
	}

	dst_sg = sgt->sgl;
	for (i=0; i<npages; i++) {
		sg_set_buf(dst_sg, vbase + offset, sg_dma_len(src_sg));
		offset += sg_dma_len(src_sg);
		src_sg = sg_next(src_sg);
		dst_sg = sg_next(dst_sg);
	}
	return 0;
}
Ejemplo n.º 9
0
int ss_hash_final(struct ahash_request *req)
{
	int pad_len = 0;
	ss_aes_req_ctx_t *req_ctx = ahash_request_ctx(req);
	ss_hash_ctx_t *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
	struct scatterlist last = {0}; /* make a sg struct for padding data. */

	if (req->result == NULL) {
		SS_ERR("Invalid result porinter. \n");
		return -EINVAL;
	}
	SS_DBG("Method: %d, cnt: %d\n", req_ctx->type, ctx->cnt);
	if (ss_dev->suspend) {
		SS_ERR("SS has already suspend. \n");
		return -EAGAIN;
	}

	/* Process the padding data. */
	pad_len = ss_hash_padding(ctx, req_ctx->type == SS_METHOD_MD5 ? 0 : 1);
	SS_DBG("Pad len: %d \n", pad_len);
	req_ctx->dma_src.sg = &last;
	sg_init_table(&last, 1);
	sg_set_buf(&last, ctx->pad, pad_len);
	SS_DBG("Padding data: \n");
	print_hex(ctx->pad, 128, (int)ctx->pad);

	ss_dev_lock();
	ss_hash_start(ctx, req_ctx, pad_len);

	ss_sha_final();

	SS_DBG("Method: %d, cnt: %d\n", req_ctx->type, ctx->cnt);

	ss_check_sha_end();
	memcpy(req->result, ctx->md, ctx->md_size);
	ss_ctrl_stop();
	ss_dev_unlock();

#ifdef SS_SHA_SWAP_FINAL_ENABLE
	if (req_ctx->type != SS_METHOD_MD5)
		ss_hash_swap(req->result, ctx->md_size);
#endif

	return 0;
}
Ejemplo n.º 10
0
static int sunxi_ss_hw_init(sunxi_ss_t *sss)
{
#ifdef CONFIG_EVB_PLATFORM
	int ret = 0;
#endif
	struct clk *pclk = NULL;

	pclk = clk_get(&sss->pdev->dev, SS_PLL_CLK);
	if (IS_ERR_OR_NULL(pclk)) {
		SS_ERR("Unable to acquire module clock '%s', return %x\n",
				SS_PLL_CLK, PTR_RET(pclk));
		return PTR_RET(pclk);
	}

	sss->mclk = clk_get(&sss->pdev->dev, sss->dev_name);
	if (IS_ERR_OR_NULL(sss->mclk)) {
		SS_ERR("Unable to acquire module clock '%s', return %x\n",
				sss->dev_name, PTR_RET(sss->mclk));
		return PTR_RET(sss->mclk);
	}

#ifdef CONFIG_EVB_PLATFORM
	ret = clk_set_parent(sss->mclk, pclk);
	if (ret != 0) {
		SS_ERR("clk_set_parent() failed! return %d\n", ret);
		return ret;
	}

	ret = clk_set_rate(sss->mclk, SS_CLK_RATE);
	if (ret != 0) {
		SS_ERR("clk_set_rate(%d) failed! return %d\n", SS_CLK_RATE, ret);
		return ret;
	}
#endif
	SS_DBG("SS mclk %luMHz, pclk %luMHz\n", clk_get_rate(sss->mclk)/1000000,
			clk_get_rate(pclk)/1000000);

	if (clk_prepare_enable(sss->mclk)) {
		SS_ERR("Couldn't enable module clock\n");
		return -EBUSY;
	}

	clk_put(pclk);
	return 0;
}
Ejemplo n.º 11
0
int ss_aes_key_valid(struct crypto_ablkcipher *tfm, int len)
{
	if (unlikely(len > AES_MAX_KEY_SIZE)) {
		SS_ERR("Unsupported key size: %d \n", len);
		tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
		return -EINVAL;
	}
	return 0;
}
Ejemplo n.º 12
0
static int ss_dma_dst_config(sunxi_ss_t *sss, void *ctx, ss_aes_req_ctx_t *req_ctx, int len, int cb)
{
	int nents = 0;
	int npages = 0;
	ss_dma_info_t *info = &req_ctx->dma_dst;
	struct dma_slave_config dma_conf = {0};
	struct dma_async_tx_descriptor *dma_desc = NULL;

	info->dir = DMA_DEV_TO_MEM;
	dma_conf.direction = info->dir;
	dma_conf.src_addr = sss->base_addr_phy + SS_REG_TXFIFO;
	dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	dma_conf.src_maxburst = 1;
	dma_conf.dst_maxburst = 1;
	dma_conf.slave_id = sunxi_slave_id(DRQDST_SDRAM, DRQSRC_SS);
	dmaengine_slave_config(info->chan, &dma_conf);

	npages = ss_sg_cnt(info->sg, len);
	WARN_ON(npages == 0);

	nents = dma_map_sg(&sss->pdev->dev, info->sg, npages, info->dir);
	SS_DBG("npages = %d, nents = %d, len = %d, sg.len = %d \n", npages, nents, len, sg_dma_len(info->sg));
	if (!nents) {
		SS_ERR("dma_map_sg() error\n");
		return -EINVAL;
	}

	info->nents = nents;
	dma_desc = dmaengine_prep_slave_sg(info->chan, info->sg, nents,
				info->dir,	DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!dma_desc) {
		SS_ERR("dmaengine_prep_slave_sg() failed!\n");
		return -1;
	}

	if (cb == 1) {
		dma_desc->callback = ss_dma_cb;
		dma_desc->callback_param = (void *)req_ctx;
	}
	dmaengine_submit(dma_desc);
	return 0;
}
Ejemplo n.º 13
0
static int ss_dma_src_config(sunxi_ss_t *sss, void *ctx, ss_aes_req_ctx_t *req_ctx, int len, int cb)
{
	int flow = ((ss_comm_ctx_t *)ctx)->flow;
	ss_dma_info_t *info = &req_ctx->dma_src;
	struct dma_slave_config dma_conf = {0};
	struct dma_async_tx_descriptor *dma_desc = NULL;

	info->dir = DMA_MEM_TO_MEM;
	dma_conf.direction = info->dir;
#ifdef SS_CTR_MODE_ENABLE
	if (req_ctx->mode == SS_AES_MODE_CTR)
		dma_conf.src_addr_width = dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
	else
#endif
	dma_conf.src_addr_width = dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

	dma_conf.src_maxburst = 1;
	dma_conf.dst_maxburst = 1;
	dma_conf.slave_id = sunxi_slave_id(DRQDST_SDRAM, DRQSRC_SDRAM);
	dmaengine_slave_config(info->chan, &dma_conf);

	ss_sg_table_init(&info->sgt_for_cp, info->sg, len, sss->flows[flow].buf_src, sss->flows[flow].buf_src_dma);
	SS_DBG("chan: 0x%p, info->sgt_for_cp.sgl: 0x%p \n", info->chan, info->sgt_for_cp.sgl);

	info->nents = info->sgt_for_cp.nents;
	SS_DBG("flow: %d, sg num: %d, total len: %d \n", flow, info->nents, len);

	dma_map_sg(&sss->pdev->dev, info->sg, info->nents, info->dir);
	dma_map_sg(&sss->pdev->dev, info->sgt_for_cp.sgl, info->nents, info->dir);

	if (SS_METHOD_IS_HASH(req_ctx->type)) {
		/* Total len is to small, so there is no data for DMA. */
		if (len < SHA1_BLOCK_SIZE)
			return 1;

		ss_hash_padding_sg_prepare(&info->sg[info->nents-1], len);
		ss_hash_padding_sg_prepare(&info->sgt_for_cp.sgl[info->nents-1], len);
	}

	dma_desc = info->chan->device->device_prep_dma_sg(info->chan,
			info->sgt_for_cp.sgl, info->nents,
			info->sg, info->nents, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!dma_desc) {
		SS_ERR("dmaengine_prep_slave_sg() failed!\n");
		return -1;
	}

	if (cb == 1) {
		dma_desc->callback = ss_dma_cb;
		dma_desc->callback_param = (void *)req_ctx;
	}
	dmaengine_submit(dma_desc);
	return 0;
}
Ejemplo n.º 14
0
static int __init sunxi_ss_init(void)
{
    int ret = 0;

	SS_DBG("[%s %s]Sunxi SS init ... \n", __DATE__, __TIME__);

	ret = platform_driver_register(&sunxi_ss_driver);
	if (ret < 0) {
		SS_ERR("platform_driver_register() failed, return %d\n", ret);
		return ret;
	}

	ret = platform_device_register(&sunxi_ss_device);
	if (ret < 0) {
		SS_ERR("platform_device_register() failed, return %d\n", ret);
		return ret;
	}

	return ret;
}
Ejemplo n.º 15
0
static int ss_rng_start(ss_aes_ctx_t *ctx, u8 *rdata, unsigned int dlen)
{
	int ret = 0;
	int flow = ctx->comm.flow;

	ss_pending_clear(flow);
	ss_irq_enable(flow);
	ss_flow_enable(flow);

#ifdef SS_TRNG_ENABLE
	if (ctx->comm.flags & SS_FLAG_TRNG) {
		ss_method_set(SS_DIR_ENCRYPT, SS_METHOD_TRNG);
		ss_trng_osc_enable();
	}
	else
#endif
		ss_method_set(SS_DIR_ENCRYPT, SS_METHOD_PRNG);

	ss_rng_mode_set(SS_RNG_MODE_CONTINUE);

	ss_data_dst_set(ss_dev->flows[flow].buf_dst_dma);
#ifdef SS_TRNG_ENABLE
	ss_data_len_set(DIV_ROUND_UP(dlen, 32)*(32>>2)); /* align with 32 Bytes */
#else
	ss_data_len_set(DIV_ROUND_UP(dlen, 20)*(20>>2)); /* align with 20 Bytes */
#endif
	SS_DBG("Flow: %d, Request: %d, Aligned: %d \n", flow, dlen, DIV_ROUND_UP(dlen, 20)*5);
	dma_map_single(&ss_dev->pdev->dev, ss_dev->flows[flow].buf_dst, SS_DMA_BUF_SIZE, DMA_DEV_TO_MEM);

	ss_ctrl_start();
	ret = wait_for_completion_timeout(&ss_dev->flows[flow].done, msecs_to_jiffies(SS_WAIT_TIME));
	if (ret == 0) {
		SS_ERR("Timed out\n");
		ss_reset();
		return -ETIMEDOUT;
	}

	memcpy(rdata, ss_dev->flows[flow].buf_dst, dlen);
	dma_unmap_single(&ss_dev->pdev->dev, ss_dev->flows[flow].buf_dst_dma, SS_DMA_BUF_SIZE, DMA_DEV_TO_MEM);
	ss_irq_disable(flow);
	ret = dlen;

#ifdef SS_TRNG_ENABLE
	if (ctx->comm.flags & SS_FLAG_TRNG)
		ss_trng_osc_disable();
#endif

	ss_ctrl_stop();
	return ret;
}
Ejemplo n.º 16
0
static int sunxi_ss_alg_register(void)
{
	int i;
	int ret = 0;

	for (i=0; i<ARRAY_SIZE(sunxi_ss_algs); i++) {
		INIT_LIST_HEAD(&sunxi_ss_algs[i].cra_list);

		sunxi_ss_algs[i].cra_priority = 300;
		sunxi_ss_algs[i].cra_ctxsize = sizeof(ss_aes_ctx_t);
		sunxi_ss_algs[i].cra_module = THIS_MODULE;
		sunxi_ss_algs[i].cra_exit = sunxi_ss_cra_exit;
		if (strncmp(sunxi_ss_algs[i].cra_name, "prng", 4) == 0)
			sunxi_ss_algs[i].cra_init = sunxi_ss_cra_rng_init;
		else
			sunxi_ss_algs[i].cra_init = sunxi_ss_cra_init;

		ret = crypto_register_alg(&sunxi_ss_algs[i]);
		if (ret != 0) {
			SS_ERR("crypto_register_alg(%s) failed! return %d \n",
				sunxi_ss_algs[i].cra_name, ret);
			return ret;
		}
	}

	for (i=0; i<ARRAY_SIZE(sunxi_ss_algs_hash); i++) {
		sunxi_ss_algs_hash[i].halg.base.cra_priority = 300;
		ret = crypto_register_ahash(&sunxi_ss_algs_hash[i]);
		if (ret != 0) {
			SS_ERR("crypto_register_ahash(%s) failed! return %d \n",
				sunxi_ss_algs_hash[i].halg.base.cra_name, ret);
			return ret;
		}
	}

	return 0;
}
Ejemplo n.º 17
0
/* request dma channel and set callback function */
static int ss_dma_prepare(ss_dma_info_t *info)
{
	dma_cap_mask_t mask;

	/* Try to acquire a generic DMA engine slave channel */
	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	info->chan = dma_request_channel(mask, NULL, NULL);
    if (info->chan == NULL) {
        SS_ERR("Request DMA() failed!\n");
        return -EINVAL;
    }
    return 0;
}
Ejemplo n.º 18
0
void ss_clk_set(u32 rate)
{
#ifdef CONFIG_EVB_PLATFORM
	int ret = 0;

	ret = clk_get_rate(ss_dev->mclk);
	if (ret == rate)
		return;

	SS_DBG("Change the SS clk to %d MHz. \n", rate/1000000);
	ret = clk_set_rate(ss_dev->mclk, rate);
	if (ret != 0)
		SS_ERR("clk_set_rate(%d) failed! return %d\n", rate, ret);
#endif
}
Ejemplo n.º 19
0
int ss_hash_start(ss_hash_ctx_t *ctx, ss_aes_req_ctx_t *req_ctx, int len)
{
	int ret = 0;
	int flow = ctx->comm.flow;

	ss_pending_clear(flow);
	ss_dma_enable(flow);
	ss_fifo_init();

	ss_method_set(req_ctx->dir, req_ctx->type);

	SS_DBG("Flow: %d, Dir: %d, Method: %d, Mode: %d, len: %d / %d \n", flow,
			req_ctx->dir, req_ctx->type, req_ctx->mode, len, ctx->cnt);

	SS_DBG("IV address = 0x%p, size = %d\n", ctx->md, ctx->md_size);
	ss_iv_set(ctx->md, ctx->md_size);
	ss_iv_mode_set(SS_IV_MODE_ARBITRARY);

	init_completion(&req_ctx->done);

	if (ss_dma_prepare(&req_ctx->dma_src))
		return -EBUSY;

	ret = ss_dma_src_config(ss_dev, ctx, req_ctx, len, 1);
	if (ret == 0) {
		ss_ctrl_start();
		ss_dma_start(&req_ctx->dma_src);

		ret = wait_for_completion_timeout(&req_ctx->done, msecs_to_jiffies(SS_WAIT_TIME));
		if (ret == 0) {
			SS_ERR("Timed out\n");
			ss_reset();
			return -ETIMEDOUT;
		}

		ss_md_get(ctx->md, NULL, ctx->md_size);
	}

	ss_dma_disable(flow);
	ss_dma_release(ss_dev, &req_ctx->dma_src);

	ctx->cnt += len;
	return 0;
}
Ejemplo n.º 20
0
static int ss_aes_start(ss_aes_ctx_t *ctx, ss_aes_req_ctx_t *req_ctx, int len)
{
	int ret = 0;
	int flow = ctx->comm.flow;

	ss_pending_clear(flow);
	ss_dma_enable(flow);
	ss_fifo_init();

	ss_method_set(req_ctx->dir, req_ctx->type);
	ss_aes_mode_set(req_ctx->mode);

	SS_DBG("Flow: %d, Dir: %d, Method: %d, Mode: %d, len: %d \n", flow, req_ctx->dir,
			req_ctx->type, req_ctx->mode, len);

	init_completion(&req_ctx->done);

	if (ss_dma_prepare(&req_ctx->dma_src))
		return -EBUSY;		
	ss_dma_prepare(&req_ctx->dma_dst);
	ss_dma_src_config(ss_dev, ctx, req_ctx, len, 0);
	ss_dma_dst_config(ss_dev, ctx, req_ctx, len, 1);

	ss_dma_start(&req_ctx->dma_dst);
	ss_ctrl_start();
	ss_dma_start(&req_ctx->dma_src);

	ret = wait_for_completion_timeout(&req_ctx->done, msecs_to_jiffies(SS_WAIT_TIME));
	if (ret == 0) {
		SS_ERR("Timed out\n");
		ss_reset();
		return -ETIMEDOUT;
	}

	ss_ctrl_stop();
	ss_dma_disable(flow);
	ss_dma_release(ss_dev, &req_ctx->dma_src);
	ss_dma_release(ss_dev, &req_ctx->dma_dst);

	return 0;
}
Ejemplo n.º 21
0
static int ss_flow_request(ss_comm_ctx_t *comm)
{
	int i;
	unsigned long flags = 0;

	spin_lock_irqsave(&ss_dev->lock, flags);
	for (i=0; i<SS_FLOW_NUM; i++) {
		if (ss_dev->flows[i].available == SS_FLOW_AVAILABLE) {
			comm->flow = i;
			ss_dev->flows[i].available = SS_FLOW_UNAVAILABLE;
			SS_DBG("The flow %d is available. \n", i);
			break;
		}
	}
	spin_unlock_irqrestore(&ss_dev->lock, flags);

	if (i == SS_FLOW_NUM) {
		SS_ERR("Failed to get an available flow. \n");
		i = -1;
	}
	return i;
}
Ejemplo n.º 22
0
int ss_aes_crypt(struct ablkcipher_request *req, int dir, int method, int mode)
{
	int err = 0;
	unsigned long flags = 0;
	ss_aes_req_ctx_t *req_ctx = ablkcipher_request_ctx(req);

	SS_DBG("nbytes: %d, dec: %d, method: %d, mode: %d\n", req->nbytes, dir, method, mode);
	if (ss_dev->suspend) {
		SS_ERR("SS has already suspend. \n");
		return -EAGAIN;
	}

	req_ctx->dir  = dir;
	req_ctx->type = method;
	req_ctx->mode = mode;
	req->base.flags |= SS_FLAG_AES;

	spin_lock_irqsave(&ss_dev->lock, flags);
	err = ablkcipher_enqueue_request(&ss_dev->queue, req);
	spin_unlock_irqrestore(&ss_dev->lock, flags);

	queue_work(ss_dev->workqueue, &ss_dev->work);
	return err;
}
Ejemplo n.º 23
0
static int ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 
				unsigned int keylen)
{
	int ret = 0;
	ss_aes_ctx_t *ctx = crypto_ablkcipher_ctx(tfm);

	SS_DBG("keylen = %d\n", keylen);
	if (ctx->comm.flags & SS_FLAG_NEW_KEY) {
		SS_ERR("The key has already update.\n");
		return -EBUSY;
	}

	ret = ss_aes_key_valid(tfm, keylen);
	if (ret != 0)
		return ret;

	ctx->key_size = keylen;
	memcpy(ctx->key, key, keylen);
	if (keylen < AES_KEYSIZE_256)
		memset(&ctx->key[keylen], 0, AES_KEYSIZE_256 - keylen);

	ctx->comm.flags |= SS_FLAG_NEW_KEY;
	return 0;
}
Ejemplo n.º 24
0
int ss_aes_one_req(sunxi_ss_t *sss, struct ablkcipher_request *req)
{
	int ret = 0;
	struct crypto_ablkcipher *tfm = NULL;
	ss_aes_ctx_t *ctx = NULL;
	ss_aes_req_ctx_t *req_ctx = NULL;

	SS_ENTER();
	if (!req->src || !req->dst) {
		SS_ERR("Invalid sg: src = %p, dst = %p\n", req->src, req->dst);
		return -EINVAL;
	}

	ss_dev_lock();

	tfm = crypto_ablkcipher_reqtfm(req);
	req_ctx = ablkcipher_request_ctx(req);
	ctx = crypto_ablkcipher_ctx(tfm);

	/* A31 SS need update key each cycle in decryption. */
	if ((ctx->comm.flags & SS_FLAG_NEW_KEY) || (req_ctx->dir == SS_DIR_DECRYPT)) {
		SS_DBG("KEY address = %p, size = %d\n", ctx->key, ctx->key_size);
		ss_key_set(ctx->key, ctx->key_size);
		ctx->comm.flags &= ~SS_FLAG_NEW_KEY;
	}

#ifdef SS_CTS_MODE_ENABLE
	if (((req_ctx->mode == SS_AES_MODE_CBC)
			|| (req_ctx->mode == SS_AES_MODE_CTS)) && (req->info != NULL)) {
#else
	if ((req_ctx->mode == SS_AES_MODE_CBC) && (req->info != NULL)) {
#endif
		SS_DBG("IV address = %p, size = %d\n", req->info, crypto_ablkcipher_ivsize(tfm));
		ss_iv_set(req->info, crypto_ablkcipher_ivsize(tfm));
	}

#ifdef SS_CTR_MODE_ENABLE
	if (req_ctx->mode == SS_AES_MODE_CTR) {
		SS_DBG("Cnt address = %p, size = %d\n", req->info, crypto_ablkcipher_ivsize(tfm));
		if (ctx->cnt == 0)
			memcpy(ctx->iv, req->info, crypto_ablkcipher_ivsize(tfm));

		SS_DBG("CNT: %08x %08x %08x %08x \n", *(int *)&ctx->iv[0],
			*(int *)&ctx->iv[4], *(int *)&ctx->iv[8], *(int *)&ctx->iv[12]);
		ss_cnt_set(ctx->iv, crypto_ablkcipher_ivsize(tfm));
	}
#endif

	req_ctx->dma_src.sg = req->src;
	req_ctx->dma_dst.sg = req->dst;

	ret = ss_aes_start(ctx, req_ctx, req->nbytes);
	if (ret < 0)
		SS_ERR("ss_aes_start fail(%d)\n", ret);

	ss_dev_unlock();

#ifdef SS_CTR_MODE_ENABLE
	if (req_ctx->mode == SS_AES_MODE_CTR) {
		ss_cnt_get(ctx->comm.flow, ctx->iv, crypto_ablkcipher_ivsize(tfm));
		SS_DBG("CNT: %08x %08x %08x %08x \n", *(int *)&ctx->iv[0],
			*(int *)&ctx->iv[4], *(int *)&ctx->iv[8], *(int *)&ctx->iv[12]);
	}
#endif

	ctx->cnt += req->nbytes;
	if (req->base.complete)
		req->base.complete(&req->base, ret);

	return ret;
}

irqreturn_t sunxi_ss_irq_handler(int irq, void *dev_id)
{
	sunxi_ss_t *sss = (sunxi_ss_t *)dev_id;
	unsigned long flags = 0;
	int pending = 0;

	spin_lock_irqsave(&sss->lock, flags);

	pending = ss_pending_get();
	SS_DBG("SS pending %#x\n", pending);
	spin_unlock_irqrestore(&sss->lock, flags);

	return IRQ_HANDLED;
}
Ejemplo n.º 25
0
static int ss_aes_start(ss_aes_ctx_t *ctx, ss_aes_req_ctx_t *req_ctx, int len)
{
	int ret = 0;
	int flow = ctx->comm.flow;

	ss_pending_clear(flow);
	ss_irq_enable(flow);
	ss_flow_enable(flow);

	ss_method_set(req_ctx->dir, req_ctx->type);
	ss_aes_mode_set(req_ctx->mode);

	SS_DBG("Flow: %d, Dir: %d, Method: %d, Mode: %d, len: %d \n", flow, req_ctx->dir,
			req_ctx->type, req_ctx->mode, len);

	init_completion(&req_ctx->done);

	/* 1. Copy data from user space to sss->flows[flow].buf_src. */
	if (ss_dma_prepare(&req_ctx->dma_src))
		return -EBUSY;
#ifdef SS_CTR_MODE_ENABLE
	if ((req_ctx->mode == SS_AES_MODE_CTR) && ((len%AES_BLOCK_SIZE) != 0))
		memset(&ss_dev->flows[flow].buf_src[len], 0, AES_BLOCK_SIZE);
#endif
	ss_dma_src_config(ss_dev, ctx, req_ctx, len, 1);
	ss_dma_start(&req_ctx->dma_src);
	ret = wait_for_completion_timeout(&req_ctx->done, msecs_to_jiffies(SS_WAIT_TIME));
	if (ret == 0) {
		SS_ERR("Timed out\n");
		return -ETIMEDOUT;
	}

	/* 2. Start the SS. */
	ss_data_src_set(ss_dev->flows[flow].buf_src_dma);
	ss_data_dst_set(ss_dev->flows[flow].buf_dst_dma);
	SS_DBG("ss_dev->buf_dst_dma = %#x\n", ss_dev->flows[flow].buf_dst_dma);
#ifdef SS_CTS_MODE_ENABLE
	if (req_ctx->mode == SS_AES_MODE_CTS) {
		ss_data_len_set(len);
		if (len < SZ_4K) /* A bad way to determin the last packet of CTS mode. */
			ss_cts_last();
	}
	else
#endif
	ss_data_len_set(DIV_ROUND_UP(len, AES_BLOCK_SIZE)*4);

	ss_ctrl_start();

	ret = wait_for_completion_timeout(&ss_dev->flows[flow].done, msecs_to_jiffies(SS_WAIT_TIME*50));
	if (ret == 0) {
		SS_ERR("Timed out\n");
		ss_reset();
		return -ETIMEDOUT;
	}

	/* 3. Copy the result from sss->flows[flow].buf_dst to user space. */
	if (ss_dma_prepare(&req_ctx->dma_dst))
		return -EBUSY;
	ss_dma_dst_config(ss_dev, ctx, req_ctx, len, 1);
	ss_dma_start(&req_ctx->dma_dst);
	ret = wait_for_completion_timeout(&req_ctx->done, msecs_to_jiffies(SS_WAIT_TIME));
	if (ret == 0) {
		SS_ERR("Timed out\n");
		return -ETIMEDOUT;
	}

	ss_ctrl_stop();
	ss_irq_disable(flow);
	ss_dma_release(ss_dev, &req_ctx->dma_src);
	ss_dma_release(ss_dev, &req_ctx->dma_dst);

	return 0;
}
Ejemplo n.º 26
0
/* Requeset the resource: IRQ, mem */
static int __devinit sunxi_ss_res_request(struct platform_device *pdev)
{
	int irq = 0;
	int ret = 0;
	struct resource	*mem_res = NULL;
	sunxi_ss_t *sss = platform_get_drvdata(pdev);

#ifdef SS_IDMA_ENABLE
	int i;
	for (i=0; i<SS_FLOW_NUM; i++) {
		sss->flows[i].buf_src = (char *)kmalloc(SS_DMA_BUF_SIZE, GFP_KERNEL);
		if (sss->flows[i].buf_src == NULL) {
			SS_ERR("Can not allocate DMA source buffer\n");
			return -ENOMEM;
		}
		sss->flows[i].buf_src_dma = virt_to_phys(sss->flows[i].buf_src);

		sss->flows[i].buf_dst = (char *)kmalloc(SS_DMA_BUF_SIZE, GFP_KERNEL);
		if (sss->flows[i].buf_dst == NULL) {
			SS_ERR("Can not allocate DMA source buffer\n");
			return -ENOMEM;
		}
		sss->flows[i].buf_dst_dma = virt_to_phys(sss->flows[i].buf_dst);
		init_completion(&sss->flows[i].done);
	}
#endif

	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (mem_res == NULL) {
		SS_ERR("Unable to get SS MEM resource\n");
		return -ENXIO;
	}

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		SS_ERR("No SS IRQ specified\n");
		return irq;
	}

	sss->irq = irq;

	ret = request_irq(sss->irq, sunxi_ss_irq_handler, IRQF_DISABLED, sss->dev_name, sss);
	if (ret != 0) {
		SS_ERR("Cannot request IRQ\n");
		return ret;
	}

	if (request_mem_region(mem_res->start,
			resource_size(mem_res), pdev->name) == NULL) {
		SS_ERR("Req mem region failed\n");
		return -ENXIO;
	}

	sss->base_addr = ioremap(mem_res->start, resource_size(mem_res));
	if (sss->base_addr == NULL) {
		SS_ERR("Unable to remap IO\n");
		return -ENXIO;
	}
	sss->base_addr_phy = mem_res->start;

	return 0;
}
Ejemplo n.º 27
0
static int ss_hash_start(ss_hash_ctx_t *ctx, ss_aes_req_ctx_t *req_ctx, int len)
{
	int ret = 0;
	int flow = ctx->comm.flow;
	int md_map_flag = 0;

	ss_pending_clear(flow);
	ss_irq_enable(flow);
	ss_flow_enable(flow);

	ss_method_set(req_ctx->dir, req_ctx->type);

	SS_DBG("Flow: %d, Dir: %d, Method: %d, Mode: %d, len: %d / %d \n", flow,
			req_ctx->dir, req_ctx->type, req_ctx->mode, len, ctx->cnt);

	SS_DBG("IV address = 0x%p, size = %d\n", ctx->md, ctx->md_size);
	ss_iv_set(ctx->md, ctx->md_size);
	ss_iv_mode_set(SS_IV_MODE_ARBITRARY);

	init_completion(&req_ctx->done);

	if (ss_dma_prepare(&req_ctx->dma_src))
		return -EBUSY;

	ret = ss_dma_src_config(ss_dev, ctx, req_ctx, len, 1);
	if (ret == 0) {
		/* 1. Copy data from user space to sss->flows[flow].buf_src. */
		ss_dma_start(&req_ctx->dma_src);
		ret = wait_for_completion_timeout(&req_ctx->done, msecs_to_jiffies(SS_WAIT_TIME));
		if (ret == 0) {
			SS_ERR("Timed out\n");
			return -ETIMEDOUT;
		}

		/* 2. Start the SS. */
		ss_data_src_set(ss_dev->flows[flow].buf_src_dma);
		ss_data_dst_set(ss_dev->flows[flow].buf_dst_dma);
		SS_DBG("ss_dev->buf_dst_dma = %#x\n", ss_dev->flows[flow].buf_dst_dma);
		ss_data_len_set((len - len%SHA1_BLOCK_SIZE)/4);

#ifdef SS_SHA_SWAP_MID_ENABLE
		if (req_ctx->type != SS_METHOD_MD5)
			ss_hash_swap(ctx->md, ctx->md_size);
#endif

		dma_map_single(&ss_dev->pdev->dev, ctx->md, ctx->md_size, DMA_MEM_TO_DEV);
		md_map_flag = 1;

		SS_DBG("Before SS, CTRL: 0x%08x \n", ss_reg_rd(SS_REG_CTL));
		dma_map_single(&ss_dev->pdev->dev, ss_dev->flows[flow].buf_dst, ctx->md_size, DMA_DEV_TO_MEM);
		ss_ctrl_start();

		ret = wait_for_completion_timeout(&ss_dev->flows[flow].done, msecs_to_jiffies(SS_WAIT_TIME));
		if (ret == 0) {
			SS_ERR("Timed out\n");
			ss_reset();
			return -ETIMEDOUT;
		}
		SS_DBG("After SS, CTRL: 0x%08x \n", ss_reg_rd(SS_REG_CTL));
		SS_DBG("After SS, dst data: \n");
		print_hex(ss_dev->flows[flow].buf_dst, 32, (int)ss_dev->flows[flow].buf_dst);

		/* 3. Copy the MD from sss->buf_dst to ctx->md. */
		memcpy(ctx->md, ss_dev->flows[flow].buf_dst, ctx->md_size);
	}

	ss_ctrl_stop();
	ss_irq_disable(flow);
	if (md_map_flag == 1) {
		dma_unmap_single(&ss_dev->pdev->dev, ss_dev->flows[flow].buf_dst_dma, ctx->md_size, DMA_DEV_TO_MEM);
		dma_unmap_single(&ss_dev->pdev->dev, virt_to_phys(ctx->md), ctx->md_size, DMA_MEM_TO_DEV);
	}
	ss_dma_release(ss_dev, &req_ctx->dma_src);

	ctx->cnt += len;
	return 0;
}
Ejemplo n.º 28
0
static int ss_aes_one_req(sunxi_ss_t *sss, struct ablkcipher_request *req)
{
	int ret = 0;
	struct crypto_ablkcipher *tfm = NULL;
	ss_aes_ctx_t *ctx = NULL;
	ss_aes_req_ctx_t *req_ctx = NULL;
	int key_map_flag = 0;
	int iv_map_flag = 0;

	SS_ENTER();
	if (!req->src || !req->dst) {
		SS_ERR("Invalid sg: src = %p, dst = %p\n", req->src, req->dst);
		return -EINVAL;
	}

	ss_dev_lock();

	tfm = crypto_ablkcipher_reqtfm(req);
	req_ctx = ablkcipher_request_ctx(req);
	ctx = crypto_ablkcipher_ctx(tfm);

	/* A31 SS need update key each cycle in decryption. */
	if ((ctx->comm.flags & SS_FLAG_NEW_KEY) || (req_ctx->dir == SS_DIR_DECRYPT)) {
		SS_DBG("KEY address = %p, size = %d\n", ctx->key, ctx->key_size);
		ss_key_set(ctx->key, ctx->key_size);
		dma_map_single(&sss->pdev->dev, ctx->key, ctx->key_size, DMA_MEM_TO_DEV);
		key_map_flag = 1;
		ctx->comm.flags &= ~SS_FLAG_NEW_KEY;
	}

#ifdef SS_CTS_MODE_ENABLE
	if (((req_ctx->mode == SS_AES_MODE_CBC)
			|| (req_ctx->mode == SS_AES_MODE_CTS)) && (req->info != NULL)) {
#else
	if ((req_ctx->mode == SS_AES_MODE_CBC) && (req->info != NULL)) {
#endif
		SS_DBG("IV address = %p, size = %d\n", req->info, crypto_ablkcipher_ivsize(tfm));
		memcpy(ctx->iv, req->info, crypto_ablkcipher_ivsize(tfm));
		ss_iv_set(ctx->iv, crypto_ablkcipher_ivsize(tfm));
		dma_map_single(&sss->pdev->dev, ctx->iv, crypto_ablkcipher_ivsize(tfm), DMA_MEM_TO_DEV);
		iv_map_flag = 1;
	}

#ifdef SS_CTR_MODE_ENABLE
	if (req_ctx->mode == SS_AES_MODE_CTR) {
		SS_DBG("Cnt address = %p, size = %d\n", req->info, crypto_ablkcipher_ivsize(tfm));
		if (ctx->cnt == 0)
			memcpy(ctx->iv, req->info, crypto_ablkcipher_ivsize(tfm));

		SS_DBG("CNT: %08x %08x %08x %08x \n", *(int *)&ctx->iv[0],
			*(int *)&ctx->iv[4], *(int *)&ctx->iv[8], *(int *)&ctx->iv[12]);
		ss_cnt_set(ctx->iv, crypto_ablkcipher_ivsize(tfm));
		dma_map_single(&sss->pdev->dev, ctx->iv, crypto_ablkcipher_ivsize(tfm), DMA_MEM_TO_DEV);
		iv_map_flag = 1;
	}
#endif

	if (req_ctx->type == SS_METHOD_RSA)
		ss_rsa_width_set(crypto_ablkcipher_ivsize(tfm));

	req_ctx->dma_src.sg = req->src;
	req_ctx->dma_dst.sg = req->dst;

	ret = ss_aes_start(ctx, req_ctx, req->nbytes);
	if (ret < 0)
		SS_ERR("ss_aes_start fail(%d)\n", ret);

	ss_dev_unlock();
	if (req->base.complete)
		req->base.complete(&req->base, ret);

	if (key_map_flag == 1)
		dma_unmap_single(&ss_dev->pdev->dev, virt_to_phys(ctx->key), ctx->key_size, DMA_MEM_TO_DEV);
	if (iv_map_flag == 1)
		dma_unmap_single(&sss->pdev->dev, virt_to_phys(ctx->iv), crypto_ablkcipher_ivsize(tfm), DMA_MEM_TO_DEV);

#ifdef SS_CTR_MODE_ENABLE
	if (req_ctx->mode == SS_AES_MODE_CTR) {
		ss_cnt_get(ctx->comm.flow, ctx->iv, crypto_ablkcipher_ivsize(tfm));
		SS_DBG("CNT: %08x %08x %08x %08x \n", *(int *)&ctx->iv[0],
			*(int *)&ctx->iv[4], *(int *)&ctx->iv[8], *(int *)&ctx->iv[12]);
	}
#endif
	ctx->cnt += req->nbytes;
	return ret;
}

static int ss_hash_one_req(sunxi_ss_t *sss, struct ahash_request *req)
{
	int ret = 0;
	ss_aes_req_ctx_t *req_ctx = NULL;
	ss_hash_ctx_t *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));

	SS_ENTER();
	if (!req->src) {
		SS_ERR("Invalid sg: src = %p\n", req->src);
		return -EINVAL;
	}

	ss_dev_lock();

	req_ctx = ahash_request_ctx(req);
	req_ctx->dma_src.sg = req->src;

	ss_hash_padding_data_prepare(ctx, req->result, req->nbytes);

	ret = ss_hash_start(ctx, req_ctx, req->nbytes);
	if (ret < 0)
		SS_ERR("ss_hash_start fail(%d)\n", ret);

	ss_dev_unlock();

	if (req->base.complete)
		req->base.complete(&req->base, ret);

	return ret;
}