예제 #1
0
static int
kclient_connect(int descidx)
{
	int ret;
	struct sock *sk;
	kclient_desc_t *desc = *(kclient_desc + descidx / KCLIENT_NCONNECTS)
					      + descidx % KCLIENT_NCONNECTS;

	ret = ss_sock_create(kclient_server_address.sa.sa_family,
			     SOCK_STREAM, IPPROTO_TCP, &sk);
	if (ret) {
		SS_DBG("Unable to create kernel socket (%d)\n", ret);
		desc->flags |= KCLIENT_CONNECT_ERROR;
		atomic_inc(&kclient_connect_nerror);
		return ret;
	}
	ss_proto_init(&desc->proto, &kclient_hooks, descidx);
	rcu_assign_sk_user_data(sk, &desc->proto);
	ss_set_callbacks(sk);
	ret = ss_connect(sk, &kclient_server_address.sa,
			 tfw_addr_sa_len(&kclient_server_address), 0);
	if (ret) {
		SS_DBG("Connect error on server socket sk %p (%d)\n", sk, ret);
		ss_release(sk);
		desc->flags |= KCLIENT_CONNECT_ERROR;
		atomic_inc(&kclient_connect_nerror);
		return ret;
        }
	desc->sk = sk;
	desc->flags |= KCLIENT_CONNECT_STARTED;
	atomic_inc(&kclient_connect_nattempt);
	return 0;
}
예제 #2
0
void sunxi_ss_work(struct work_struct *work)
{
	int ret = 0;
    unsigned long flags = 0;
	sunxi_ss_t *sss = container_of(work, sunxi_ss_t, work);
	struct crypto_async_request *async_req = NULL;
	struct crypto_async_request *backlog = NULL;

	/* empty the crypto queue and then return */
	do {
		spin_lock_irqsave(&sss->lock, flags);
		backlog = crypto_get_backlog(&sss->queue);
		async_req = crypto_dequeue_request(&sss->queue);
		spin_unlock_irqrestore(&sss->lock, flags);

		if (!async_req) {
			SS_DBG("async_req is NULL! \n");
			break;
		}

		if (backlog)
			backlog->complete(backlog, -EINPROGRESS);

		SS_DBG("async_req->flags = %#x \n", async_req->flags);
		if (async_req->flags & SS_FLAG_AES)
			ret = ss_aes_one_req(sss, ablkcipher_request_cast(async_req));
		else if (async_req->flags & SS_FLAG_HASH)
			ret = ss_hash_one_req(sss, ahash_request_cast(async_req));
	} while (!ret);
}
예제 #3
0
int ss_rng_get_random(struct crypto_rng *tfm, u8 *rdata, unsigned int dlen)
{
	int ret = 0;
	ss_aes_ctx_t *ctx = crypto_rng_ctx(tfm);

	SS_DBG("flow = %d, rdata = %p, len = %d \n", ctx->comm.flow, rdata, dlen);
	if (ss_dev->suspend) {
		SS_ERR("SS has already suspend. \n");
		return -EAGAIN;
	}

	ss_dev_lock();

	/* Must set the seed addr in PRNG/TRNG. */
	ss_key_set(ctx->key, ctx->key_size);
	dma_map_single(&ss_dev->pdev->dev, ctx->key, ctx->key_size, DMA_MEM_TO_DEV);

	ret = ss_rng_start(ctx, rdata, dlen);
	ss_dev_unlock();

	SS_DBG("Get %d byte random. \n", ret);

	dma_unmap_single(&ss_dev->pdev->dev, virt_to_phys(ctx->key), ctx->key_size, DMA_MEM_TO_DEV);

	return ret;
}
예제 #4
0
static int ss_dma_src_config(sunxi_ss_t *sss, void *ctx, ss_aes_req_ctx_t *req_ctx, int len, int cb)
{
	int flow = ((ss_comm_ctx_t *)ctx)->flow;
	ss_dma_info_t *info = &req_ctx->dma_src;
	struct dma_slave_config dma_conf = {0};
	struct dma_async_tx_descriptor *dma_desc = NULL;

	info->dir = DMA_MEM_TO_MEM;
	dma_conf.direction = info->dir;
#ifdef SS_CTR_MODE_ENABLE
	if (req_ctx->mode == SS_AES_MODE_CTR)
		dma_conf.src_addr_width = dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
	else
#endif
	dma_conf.src_addr_width = dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

	dma_conf.src_maxburst = 1;
	dma_conf.dst_maxburst = 1;
	dma_conf.slave_id = sunxi_slave_id(DRQDST_SDRAM, DRQSRC_SDRAM);
	dmaengine_slave_config(info->chan, &dma_conf);

	ss_sg_table_init(&info->sgt_for_cp, info->sg, len, sss->flows[flow].buf_src, sss->flows[flow].buf_src_dma);
	SS_DBG("chan: 0x%p, info->sgt_for_cp.sgl: 0x%p \n", info->chan, info->sgt_for_cp.sgl);

	info->nents = info->sgt_for_cp.nents;
	SS_DBG("flow: %d, sg num: %d, total len: %d \n", flow, info->nents, len);

	dma_map_sg(&sss->pdev->dev, info->sg, info->nents, info->dir);
	dma_map_sg(&sss->pdev->dev, info->sgt_for_cp.sgl, info->nents, info->dir);

	if (SS_METHOD_IS_HASH(req_ctx->type)) {
		/* Total len is to small, so there is no data for DMA. */
		if (len < SHA1_BLOCK_SIZE)
			return 1;

		ss_hash_padding_sg_prepare(&info->sg[info->nents-1], len);
		ss_hash_padding_sg_prepare(&info->sgt_for_cp.sgl[info->nents-1], len);
	}

	dma_desc = info->chan->device->device_prep_dma_sg(info->chan,
			info->sgt_for_cp.sgl, info->nents,
			info->sg, info->nents, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!dma_desc) {
		SS_ERR("dmaengine_prep_slave_sg() failed!\n");
		return -1;
	}

	if (cb == 1) {
		dma_desc->callback = ss_dma_cb;
		dma_desc->callback_param = (void *)req_ctx;
	}
	dmaengine_submit(dma_desc);
	return 0;
}
예제 #5
0
static int __devinit sunxi_ss_probe(struct platform_device *pdev)
{
	int ret = 0;
	sunxi_ss_t *sss = NULL;

	sss = devm_kzalloc(&pdev->dev, sizeof(sunxi_ss_t), GFP_KERNEL);
	if (sss == NULL) {
		SS_ERR("Unable to allocate sunxi_ss_t\n");
		return -ENOMEM;
	}

	snprintf(sss->dev_name, sizeof(sss->dev_name), SUNXI_SS_DEV_NAME);
	platform_set_drvdata(pdev, sss);

	ret = sunxi_ss_res_request(pdev);
	if (ret != 0) {
		goto err0;
	}

    sss->pdev = pdev;

	ret = sunxi_ss_hw_init(sss);
	if (ret != 0) {
		SS_ERR("SS hw init failed!\n");
		goto err1;
	}

	spin_lock_init(&sss->lock);
	INIT_WORK(&sss->work, sunxi_ss_work);
	crypto_init_queue(&sss->queue, 16);

	sss->workqueue = create_singlethread_workqueue(sss->dev_name);
	if (sss->workqueue == NULL) {
		SS_ERR("Unable to create workqueue\n");
		ret = -EPERM;
		goto err2;
	}

	ret = sunxi_ss_alg_register();
	if (ret != 0) {
		SS_ERR("sunxi_ss_alg_register() failed! return %d \n", ret);
		goto err3;
	}

	sunxi_ss_sysfs_create(pdev);

	ss_dev = sss;
	SS_DBG("SS driver probe succeed, base 0x%p, irq %d!\n", sss->base_addr, sss->irq);
	return 0;

err3:
	destroy_workqueue(sss->workqueue);
err2:
	sunxi_ss_hw_exit(sss);
err1:
	sunxi_ss_res_release(sss);
err0:
	platform_set_drvdata(pdev, NULL);
	return ret;
}
예제 #6
0
int ss_hash_update(struct ahash_request *req)
{
	int err = 0;
	unsigned long flags = 0;

	if (!req->nbytes) {
		SS_ERR("Invalid length: %d. \n", req->nbytes);
		return 0;
	}

	SS_DBG("Flags: %#x, len = %d \n", req->base.flags, req->nbytes);
	if (ss_dev->suspend) {
		SS_ERR("SS has already suspend. \n");
		return -EAGAIN;
	}

	req->base.flags |= SS_FLAG_HASH;

	spin_lock_irqsave(&ss_dev->lock, flags);
	err = ahash_enqueue_request(&ss_dev->queue, req);
	spin_unlock_irqrestore(&ss_dev->lock, flags);

	queue_work(ss_dev->workqueue, &ss_dev->work);
	return err;
}
예제 #7
0
/* Callback of DMA completion. */
static void ss_dma_cb(void *data)
{
	ss_aes_req_ctx_t *req_ctx = (ss_aes_req_ctx_t *)data;

	SS_DBG("DMA transfer data complete!\n");

	complete(&req_ctx->done);
}
예제 #8
0
int ss_hash_final(struct ahash_request *req)
{
	int pad_len = 0;
	ss_aes_req_ctx_t *req_ctx = ahash_request_ctx(req);
	ss_hash_ctx_t *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
	struct scatterlist last = {0}; /* make a sg struct for padding data. */

	if (req->result == NULL) {
		SS_ERR("Invalid result porinter. \n");
		return -EINVAL;
	}
	SS_DBG("Method: %d, cnt: %d\n", req_ctx->type, ctx->cnt);
	if (ss_dev->suspend) {
		SS_ERR("SS has already suspend. \n");
		return -EAGAIN;
	}

	/* Process the padding data. */
	pad_len = ss_hash_padding(ctx, req_ctx->type == SS_METHOD_MD5 ? 0 : 1);
	SS_DBG("Pad len: %d \n", pad_len);
	req_ctx->dma_src.sg = &last;
	sg_init_table(&last, 1);
	sg_set_buf(&last, ctx->pad, pad_len);
	SS_DBG("Padding data: \n");
	print_hex(ctx->pad, 128, (int)ctx->pad);

	ss_dev_lock();
	ss_hash_start(ctx, req_ctx, pad_len);

	ss_sha_final();

	SS_DBG("Method: %d, cnt: %d\n", req_ctx->type, ctx->cnt);

	ss_check_sha_end();
	memcpy(req->result, ctx->md, ctx->md_size);
	ss_ctrl_stop();
	ss_dev_unlock();

#ifdef SS_SHA_SWAP_FINAL_ENABLE
	if (req_ctx->type != SS_METHOD_MD5)
		ss_hash_swap(req->result, ctx->md_size);
#endif

	return 0;
}
예제 #9
0
static int sunxi_ss_cra_init(struct crypto_tfm *tfm)
{
	if (ss_flow_request(crypto_tfm_ctx(tfm)) < 0)
		return -1;

	tfm->crt_ablkcipher.reqsize = sizeof(ss_aes_req_ctx_t);
	SS_DBG("reqsize = %d \n", tfm->crt_u.ablkcipher.reqsize);
	return 0;
}
예제 #10
0
int ss_hash_start(ss_hash_ctx_t *ctx, ss_aes_req_ctx_t *req_ctx, int len)
{
	int ret = 0;
	int flow = ctx->comm.flow;

	ss_pending_clear(flow);
	ss_dma_enable(flow);
	ss_fifo_init();

	ss_method_set(req_ctx->dir, req_ctx->type);

	SS_DBG("Flow: %d, Dir: %d, Method: %d, Mode: %d, len: %d / %d \n", flow,
			req_ctx->dir, req_ctx->type, req_ctx->mode, len, ctx->cnt);

	SS_DBG("IV address = 0x%p, size = %d\n", ctx->md, ctx->md_size);
	ss_iv_set(ctx->md, ctx->md_size);
	ss_iv_mode_set(SS_IV_MODE_ARBITRARY);

	init_completion(&req_ctx->done);

	if (ss_dma_prepare(&req_ctx->dma_src))
		return -EBUSY;

	ret = ss_dma_src_config(ss_dev, ctx, req_ctx, len, 1);
	if (ret == 0) {
		ss_ctrl_start();
		ss_dma_start(&req_ctx->dma_src);

		ret = wait_for_completion_timeout(&req_ctx->done, msecs_to_jiffies(SS_WAIT_TIME));
		if (ret == 0) {
			SS_ERR("Timed out\n");
			ss_reset();
			return -ETIMEDOUT;
		}

		ss_md_get(ctx->md, NULL, ctx->md_size);
	}

	ss_dma_disable(flow);
	ss_dma_release(ss_dev, &req_ctx->dma_src);

	ctx->cnt += len;
	return 0;
}
예제 #11
0
파일: sock.c 프로젝트: chixsh/tempesta
/**
 * @skb_list can be invalid after the function call, don't try to use it.
 */
static void
ss_do_send(struct sock *sk, SsSkbList *skb_list)
{
	struct tcp_sock *tp = tcp_sk(sk);
	struct sk_buff *skb;
	int size, mss = tcp_send_mss(sk, &size, MSG_DONTWAIT);

	SS_DBG("%s: cpu=%d sk=%p queue_empty=%d send_head=%p"
	       " sk_state=%d mss=%d size=%d\n", __func__,
	       smp_processor_id(), sk, tcp_write_queue_empty(sk),
	       tcp_send_head(sk), sk->sk_state, mss, size);

	if (unlikely(!ss_sock_active(sk)))
		return;
	ss_sock_cpu_check(sk);

	while ((skb = ss_skb_dequeue(skb_list))) {
		skb->ip_summed = CHECKSUM_PARTIAL;
		skb_shinfo(skb)->gso_segs = 0;

		/*
		 * TODO Mark all data with PUSH to force receiver to consume
		 * the data. Currently we do this for debugging purposes.
		 * We need to do this only for complete messages/skbs.
		 * Actually tcp_push() already does it for the last skb.
		 * MSG_MORE should be used, probably by connection layer.
		 */
		tcp_mark_push(tp, skb);

		SS_DBG("%s: entail skb=%p data_len=%u len=%u\n",
		       __func__, skb, skb->data_len, skb->len);

		ss_skb_entail(sk, skb);

		tp->write_seq += skb->len;
		TCP_SKB_CB(skb)->end_seq += skb->len;
	}

	SS_DBG("%s: sk=%p send_head=%p sk_state=%d\n", __func__,
	       sk, tcp_send_head(sk), sk->sk_state);

	tcp_push(sk, MSG_DONTWAIT, mss, TCP_NAGLE_OFF|TCP_NAGLE_PUSH, size);
}
예제 #12
0
파일: sock.c 프로젝트: chixsh/tempesta
/**
 * Directly insert all skbs from @skb_list into @sk TCP write queue regardless
 * write buffer size. This allows directly forward modified packets without
 * copying. See do_tcp_sendpages() and tcp_sendmsg() in linux/net/ipv4/tcp.c.
 *
 * Can be called in softirq context as well as from kernel thread.
 *
 * TODO use MSG_MORE untill we reach end of message.
 */
int
ss_send(struct sock *sk, SsSkbList *skb_list, bool pass_skb)
{
	int r = 0;
	struct sk_buff *skb, *skb_copy;
	SsWork sw = {
		.sk	= sk,
		.action	= SS_SEND,
	};

	BUG_ON(!sk);
	BUG_ON(ss_skb_queue_empty(skb_list));
	SS_DBG("%s: cpu=%d sk=%p (cpu=%d) state=%s\n", __func__,
	       smp_processor_id(), sk, sk->sk_incoming_cpu,
	       ss_statename[sk->sk_state]);

	/*
	 * Remove the skbs from Tempesta lists if we won't use them,
	 * or copy them if they're going to be used by Tempesta during
	 * and after the transmission.
	 */
	if (pass_skb) {
		sw.skb_list = *skb_list;
		ss_skb_queue_head_init(skb_list);
	} else {
		ss_skb_queue_head_init(&sw.skb_list);
		for (skb = ss_skb_peek(skb_list); skb; skb = ss_skb_next(skb)) {
			/* tcp_transmit_skb() will clone the skb. */
			skb_copy = pskb_copy_for_clone(skb, GFP_ATOMIC);
			if (!skb_copy) {
				SS_WARN("Unable to copy an egress SKB.\n");
				r = -ENOMEM;
				goto err;
			}
			ss_skb_queue_tail(&sw.skb_list, skb_copy);
		}
	}

	/*
	 * Schedule the socket for TX softirq processing.
	 * Only part of @skb_list could be passed to send queue.
	 */
	if (ss_wq_push(&sw)) {
		SS_WARN("Cannot schedule socket %p for transmission\n", sk);
		r = -EBUSY;
		goto err;
	}

	return 0;
err:
	if (!pass_skb)
		while ((skb = ss_skb_dequeue(&sw.skb_list)))
			kfree_skb(skb);
	return r;
}
예제 #13
0
int ss_rng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
{
	ss_aes_ctx_t *ctx = crypto_rng_ctx(tfm);

	SS_DBG("Seed len: %d, ctx->flags = %#x \n", slen, ctx->comm.flags);
	ctx->key_size = slen;
	memcpy(ctx->key, seed, slen);
	ctx->comm.flags |= SS_FLAG_NEW_KEY;

	return 0;
}
예제 #14
0
static int
kclient_thread_connect(void *data)
{
	int i, nconnects = 0;
	int threadn = (int)(long)data;
	int descidx = threadn * KCLIENT_NCONNECTS;

	SS_DBG("connect_thread_%02d started\n", threadn);
	for (i = 0; i < KCLIENT_NCONNECTS; i++) {
		if (kclient_connect(descidx + i) == 0) {
			nconnects++;
		}
	}
	kclient_connect_task[threadn] = NULL;
	atomic_dec(&kclient_nthreads);
	wake_up(&kclient_connect_wq);
	SS_DBG("Thread %d has initiated %d connects out of %d\n",
	       threadn, nconnects, KCLIENT_NCONNECTS);
	return 0;
}
예제 #15
0
static int sunxi_ss_cra_hash_init(struct crypto_tfm *tfm)
{
	if (ss_flow_request(crypto_tfm_ctx(tfm)) < 0)
		return -1;

	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
				 sizeof(ss_aes_req_ctx_t));

	SS_DBG("reqsize = %d \n", sizeof(ss_aes_req_ctx_t));
	return 0;
}
예제 #16
0
/* ctx - only used for HASH. */
static int ss_dma_src_config(sunxi_ss_t *sss, void *ctx, ss_aes_req_ctx_t *req_ctx, int len, int cb)
{
	int nents = 0;
	int npages = 0;
	ss_dma_info_t *info = &req_ctx->dma_src;
	struct dma_slave_config dma_conf = {0};
	struct dma_async_tx_descriptor *dma_desc = NULL;

	info->dir = DMA_MEM_TO_DEV;
	dma_conf.direction = info->dir;
	dma_conf.dst_addr = sss->base_addr_phy + SS_REG_RXFIFO;
	dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	dma_conf.src_maxburst = 1;
	dma_conf.dst_maxburst = 1;
	dma_conf.slave_id = sunxi_slave_id(DRQDST_SS, DRQSRC_SDRAM);
	dmaengine_slave_config(info->chan, &dma_conf);

	npages = ss_sg_cnt(info->sg, len);
	WARN_ON(npages == 0);

	nents = dma_map_sg(&sss->pdev->dev, info->sg, npages, info->dir);
	SS_DBG("npages = %d, nents = %d, len = %d, sg.len = %d \n", npages, nents, len, sg_dma_len(info->sg));
	if (!nents) {
		SS_ERR("dma_map_sg() error\n");
		return -EINVAL;
	}

	info->nents = nents;

	if (SS_METHOD_IS_HASH(req_ctx->type)) {
		ss_hash_padding_sg_prepare(&info->sg[nents-1], len);

		/* Total len is too small, so there is no data for DMA. */
		if (len < SHA1_BLOCK_SIZE)
			return 1;
	}

	dma_desc = dmaengine_prep_slave_sg(info->chan, info->sg, nents,
				info->dir,	DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!dma_desc) {
		SS_ERR("dmaengine_prep_slave_sg() failed!\n");
		return -1;
	}

	if (cb == 1) {
		dma_desc->callback = ss_dma_cb;
		dma_desc->callback_param = (void *)req_ctx;
	}
	dmaengine_submit(dma_desc);
	return 0;
}
예제 #17
0
int ss_rng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
{
	int len = slen > SS_PRNG_SEED_LEN ? SS_PRNG_SEED_LEN : slen;
	ss_aes_ctx_t *ctx = crypto_rng_ctx(tfm);

	SS_DBG("Seed len: %d/%d, ctx->flags = %#x \n", len, slen, ctx->comm.flags);
	ctx->key_size = len;
	memset(ctx->key, 0, SS_PRNG_SEED_LEN);
	memcpy(ctx->key, seed, len);
	ctx->comm.flags |= SS_FLAG_NEW_KEY;

	return 0;
}
예제 #18
0
static int ss_rng_start(ss_aes_ctx_t *ctx, u8 *rdata, unsigned int dlen)
{
	int ret = 0;
	int flow = ctx->comm.flow;

	ss_pending_clear(flow);
	ss_irq_enable(flow);
	ss_flow_enable(flow);

#ifdef SS_TRNG_ENABLE
	if (ctx->comm.flags & SS_FLAG_TRNG) {
		ss_method_set(SS_DIR_ENCRYPT, SS_METHOD_TRNG);
		ss_trng_osc_enable();
	}
	else
#endif
		ss_method_set(SS_DIR_ENCRYPT, SS_METHOD_PRNG);

	ss_rng_mode_set(SS_RNG_MODE_CONTINUE);

	ss_data_dst_set(ss_dev->flows[flow].buf_dst_dma);
#ifdef SS_TRNG_ENABLE
	ss_data_len_set(DIV_ROUND_UP(dlen, 32)*(32>>2)); /* align with 32 Bytes */
#else
	ss_data_len_set(DIV_ROUND_UP(dlen, 20)*(20>>2)); /* align with 20 Bytes */
#endif
	SS_DBG("Flow: %d, Request: %d, Aligned: %d \n", flow, dlen, DIV_ROUND_UP(dlen, 20)*5);
	dma_map_single(&ss_dev->pdev->dev, ss_dev->flows[flow].buf_dst, SS_DMA_BUF_SIZE, DMA_DEV_TO_MEM);

	ss_ctrl_start();
	ret = wait_for_completion_timeout(&ss_dev->flows[flow].done, msecs_to_jiffies(SS_WAIT_TIME));
	if (ret == 0) {
		SS_ERR("Timed out\n");
		ss_reset();
		return -ETIMEDOUT;
	}

	memcpy(rdata, ss_dev->flows[flow].buf_dst, dlen);
	dma_unmap_single(&ss_dev->pdev->dev, ss_dev->flows[flow].buf_dst_dma, SS_DMA_BUF_SIZE, DMA_DEV_TO_MEM);
	ss_irq_disable(flow);
	ret = dlen;

#ifdef SS_TRNG_ENABLE
	if (ctx->comm.flags & SS_FLAG_TRNG)
		ss_trng_osc_disable();
#endif

	ss_ctrl_stop();
	return ret;
}
예제 #19
0
irqreturn_t sunxi_ss_irq_handler(int irq, void *dev_id)
{
	sunxi_ss_t *sss = (sunxi_ss_t *)dev_id;
	unsigned long flags = 0;
	int pending = 0;

	spin_lock_irqsave(&sss->lock, flags);

	pending = ss_pending_get();
	SS_DBG("SS pending %#x\n", pending);
	if (pending&SS_REG_ICR_FLOW0_PENDING_MASK) {
		ss_pending_clear(0);
		complete(&sss->flows[0].done);
	}
	if (pending&SS_REG_ICR_FLOW1_PENDING_MASK) {
		ss_pending_clear(1);
		complete(&sss->flows[1].done);
	}
	spin_unlock_irqrestore(&sss->lock, flags);
	SS_DBG("SS pending %#x, CTRL: 0x%08x\n", ss_pending_get(), ss_reg_rd(SS_REG_CTL));

	return IRQ_HANDLED;
}
예제 #20
0
void ss_clk_set(u32 rate)
{
#ifdef CONFIG_EVB_PLATFORM
	int ret = 0;

	ret = clk_get_rate(ss_dev->mclk);
	if (ret == rate)
		return;

	SS_DBG("Change the SS clk to %d MHz. \n", rate/1000000);
	ret = clk_set_rate(ss_dev->mclk, rate);
	if (ret != 0)
		SS_ERR("clk_set_rate(%d) failed! return %d\n", rate, ret);
#endif
}
예제 #21
0
static int ss_hash_init(struct ahash_request *req, int type, int size, char *iv)
{
	ss_aes_req_ctx_t *req_ctx = ahash_request_ctx(req);
	ss_hash_ctx_t *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));

	SS_DBG("Method: %d \n", type);

	memset(req_ctx, 0, sizeof(ss_aes_req_ctx_t));
	req_ctx->type = type;

	ctx->md_size = size;
	memcpy(ctx->md, iv, size);

	ctx->cnt = 0;
	memset(ctx->pad, 0, SS_HASH_PAD_SIZE);
	return 0;
}
예제 #22
0
static int sunxi_ss_hw_init(sunxi_ss_t *sss)
{
#ifdef CONFIG_EVB_PLATFORM
	int ret = 0;
#endif
	struct clk *pclk = NULL;

	pclk = clk_get(&sss->pdev->dev, SS_PLL_CLK);
	if (IS_ERR_OR_NULL(pclk)) {
		SS_ERR("Unable to acquire module clock '%s', return %x\n",
				SS_PLL_CLK, PTR_RET(pclk));
		return PTR_RET(pclk);
	}

	sss->mclk = clk_get(&sss->pdev->dev, sss->dev_name);
	if (IS_ERR_OR_NULL(sss->mclk)) {
		SS_ERR("Unable to acquire module clock '%s', return %x\n",
				sss->dev_name, PTR_RET(sss->mclk));
		return PTR_RET(sss->mclk);
	}

#ifdef CONFIG_EVB_PLATFORM
	ret = clk_set_parent(sss->mclk, pclk);
	if (ret != 0) {
		SS_ERR("clk_set_parent() failed! return %d\n", ret);
		return ret;
	}

	ret = clk_set_rate(sss->mclk, SS_CLK_RATE);
	if (ret != 0) {
		SS_ERR("clk_set_rate(%d) failed! return %d\n", SS_CLK_RATE, ret);
		return ret;
	}
#endif
	SS_DBG("SS mclk %luMHz, pclk %luMHz\n", clk_get_rate(sss->mclk)/1000000,
			clk_get_rate(pclk)/1000000);

	if (clk_prepare_enable(sss->mclk)) {
		SS_ERR("Couldn't enable module clock\n");
		return -EBUSY;
	}

	clk_put(pclk);
	return 0;
}
예제 #23
0
static int ss_sg_cnt(struct scatterlist *sg, int total)
{
	int cnt = 0;
	int nbyte = 0;
	struct scatterlist *cur = sg;

	while (cur != NULL) {
		cnt++;
		SS_DBG("cnt = %d, cur = %p, len = %d \n", cnt, cur, sg_dma_len(cur));
		nbyte += sg_dma_len(cur);
		if (nbyte >= total)
			return cnt;

		cur = sg_next(cur);
	}

	return cnt;
}
예제 #24
0
static int ss_dma_dst_config(sunxi_ss_t *sss, void *ctx, ss_aes_req_ctx_t *req_ctx, int len, int cb)
{
	int nents = 0;
	int npages = 0;
	ss_dma_info_t *info = &req_ctx->dma_dst;
	struct dma_slave_config dma_conf = {0};
	struct dma_async_tx_descriptor *dma_desc = NULL;

	info->dir = DMA_DEV_TO_MEM;
	dma_conf.direction = info->dir;
	dma_conf.src_addr = sss->base_addr_phy + SS_REG_TXFIFO;
	dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	dma_conf.src_maxburst = 1;
	dma_conf.dst_maxburst = 1;
	dma_conf.slave_id = sunxi_slave_id(DRQDST_SDRAM, DRQSRC_SS);
	dmaengine_slave_config(info->chan, &dma_conf);

	npages = ss_sg_cnt(info->sg, len);
	WARN_ON(npages == 0);

	nents = dma_map_sg(&sss->pdev->dev, info->sg, npages, info->dir);
	SS_DBG("npages = %d, nents = %d, len = %d, sg.len = %d \n", npages, nents, len, sg_dma_len(info->sg));
	if (!nents) {
		SS_ERR("dma_map_sg() error\n");
		return -EINVAL;
	}

	info->nents = nents;
	dma_desc = dmaengine_prep_slave_sg(info->chan, info->sg, nents,
				info->dir,	DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!dma_desc) {
		SS_ERR("dmaengine_prep_slave_sg() failed!\n");
		return -1;
	}

	if (cb == 1) {
		dma_desc->callback = ss_dma_cb;
		dma_desc->callback_param = (void *)req_ctx;
	}
	dmaengine_submit(dma_desc);
	return 0;
}
예제 #25
0
static int __init sunxi_ss_init(void)
{
    int ret = 0;

	SS_DBG("[%s %s]Sunxi SS init ... \n", __DATE__, __TIME__);

	ret = platform_driver_register(&sunxi_ss_driver);
	if (ret < 0) {
		SS_ERR("platform_driver_register() failed, return %d\n", ret);
		return ret;
	}

	ret = platform_device_register(&sunxi_ss_device);
	if (ret < 0) {
		SS_ERR("platform_device_register() failed, return %d\n", ret);
		return ret;
	}

	return ret;
}
예제 #26
0
static int ss_aes_start(ss_aes_ctx_t *ctx, ss_aes_req_ctx_t *req_ctx, int len)
{
	int ret = 0;
	int flow = ctx->comm.flow;

	ss_pending_clear(flow);
	ss_dma_enable(flow);
	ss_fifo_init();

	ss_method_set(req_ctx->dir, req_ctx->type);
	ss_aes_mode_set(req_ctx->mode);

	SS_DBG("Flow: %d, Dir: %d, Method: %d, Mode: %d, len: %d \n", flow, req_ctx->dir,
			req_ctx->type, req_ctx->mode, len);

	init_completion(&req_ctx->done);

	if (ss_dma_prepare(&req_ctx->dma_src))
		return -EBUSY;		
	ss_dma_prepare(&req_ctx->dma_dst);
	ss_dma_src_config(ss_dev, ctx, req_ctx, len, 0);
	ss_dma_dst_config(ss_dev, ctx, req_ctx, len, 1);

	ss_dma_start(&req_ctx->dma_dst);
	ss_ctrl_start();
	ss_dma_start(&req_ctx->dma_src);

	ret = wait_for_completion_timeout(&req_ctx->done, msecs_to_jiffies(SS_WAIT_TIME));
	if (ret == 0) {
		SS_ERR("Timed out\n");
		ss_reset();
		return -ETIMEDOUT;
	}

	ss_ctrl_stop();
	ss_dma_disable(flow);
	ss_dma_release(ss_dev, &req_ctx->dma_src);
	ss_dma_release(ss_dev, &req_ctx->dma_dst);

	return 0;
}
예제 #27
0
static int ss_flow_request(ss_comm_ctx_t *comm)
{
	int i;
	unsigned long flags = 0;

	spin_lock_irqsave(&ss_dev->lock, flags);
	for (i=0; i<SS_FLOW_NUM; i++) {
		if (ss_dev->flows[i].available == SS_FLOW_AVAILABLE) {
			comm->flow = i;
			ss_dev->flows[i].available = SS_FLOW_UNAVAILABLE;
			SS_DBG("The flow %d is available. \n", i);
			break;
		}
	}
	spin_unlock_irqrestore(&ss_dev->lock, flags);

	if (i == SS_FLOW_NUM) {
		SS_ERR("Failed to get an available flow. \n");
		i = -1;
	}
	return i;
}
예제 #28
0
int ss_aes_crypt(struct ablkcipher_request *req, int dir, int method, int mode)
{
	int err = 0;
	unsigned long flags = 0;
	ss_aes_req_ctx_t *req_ctx = ablkcipher_request_ctx(req);

	SS_DBG("nbytes: %d, dec: %d, method: %d, mode: %d\n", req->nbytes, dir, method, mode);
	if (ss_dev->suspend) {
		SS_ERR("SS has already suspend. \n");
		return -EAGAIN;
	}

	req_ctx->dir  = dir;
	req_ctx->type = method;
	req_ctx->mode = mode;
	req->base.flags |= SS_FLAG_AES;

	spin_lock_irqsave(&ss_dev->lock, flags);
	err = ablkcipher_enqueue_request(&ss_dev->queue, req);
	spin_unlock_irqrestore(&ss_dev->lock, flags);

	queue_work(ss_dev->workqueue, &ss_dev->work);
	return err;
}
예제 #29
0
static int ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 
				unsigned int keylen)
{
	int ret = 0;
	ss_aes_ctx_t *ctx = crypto_ablkcipher_ctx(tfm);

	SS_DBG("keylen = %d\n", keylen);
	if (ctx->comm.flags & SS_FLAG_NEW_KEY) {
		SS_ERR("The key has already update.\n");
		return -EBUSY;
	}

	ret = ss_aes_key_valid(tfm, keylen);
	if (ret != 0)
		return ret;

	ctx->key_size = keylen;
	memcpy(ctx->key, key, keylen);
	if (keylen < AES_KEYSIZE_256)
		memset(&ctx->key[keylen], 0, AES_KEYSIZE_256 - keylen);

	ctx->comm.flags |= SS_FLAG_NEW_KEY;
	return 0;
}
예제 #30
0
파일: ss_skb.c 프로젝트: nekulin/tempesta
/**
 * Fragment @skb to add some room if @len > 0 or delete data otherwise.
 */
static int
__skb_fragment(struct sk_buff *skb, struct sk_buff *pskb,
	       char *pspt, int len, TfwStr *it)
{
	int i, ret;
	long offset;
	unsigned int d_size;
	struct sk_buff *f_skb, **next_fdp;

	SS_DBG("[%d]: %s: in: len [%d] pspt [%p], skb [%p]: head [%p]"
		" data [%p] tail [%p] end [%p] len [%u] data_len [%u]"
		" truesize [%u] nr_frags [%u]\n",
		smp_processor_id(), __func__, len, pspt, skb, skb->head,
		skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
		skb->len, skb->data_len, skb->truesize,
		skb_shinfo(skb)->nr_frags);
	BUG_ON(!len);

	if (abs(len) > PAGE_SIZE) {
		SS_WARN("Attempt to add or delete too much data: %u\n", len);
		return -EINVAL;
	}

	/*
	 * Use @it to hold the return values from __split_pgfrag()
	 * and __split_linear_data(). @it->ptr, @it->skb, and
	 * @it->flags may be set to actual values. If a new SKB is
	 * allocated, then it is stored in @it->skb. @it->ptr holds
	 * the pointer either to data after the deleted data, or to
	 * the area for new data. @it->flags is set when @it->ptr
	 * points to data in @it->skb. Otherwise, @it->ptr points
	 * to data in @skb.
	 *
	 * Determine where the split begins within the SKB, then do
	 * the job using the right function.
	 */

	/* See if the split starts in the linear data. */
	d_size = skb_headlen(skb);
	offset = pspt - (char *)skb->data;

	if ((offset >= 0) && (offset < d_size)) {
		int t_size = d_size - offset;
		len = max(len, -t_size);
		ret = __split_linear_data(skb, pspt, len, it);
		goto done;
	}

	/* See if the split starts in the page fragments data. */
	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
		d_size = skb_frag_size(frag);
		offset = pspt - (char *)skb_frag_address(frag);

		if ((offset >= 0) && (offset < d_size)) {
			int t_size = d_size - offset;
			len = max(len, -t_size);
			ret = __split_pgfrag(skb, i, offset, len, it);
			goto done;
		}
	}

	/* See if the split starts in the SKB fragments data. */
	skb_walk_frags(skb, f_skb) {
		ret = __skb_fragment(f_skb, skb, pspt, len, it);
		if (ret != -ENOENT)
			return ret;
	}