示例#1
0
static void flexrm_spu_dma_unmap(struct device *dev, struct brcm_message *msg)
{
	dma_unmap_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
		     DMA_FROM_DEVICE);
	dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
		     DMA_TO_DEVICE);
}
示例#2
0
文件: qat_algs.c 项目: volk3/CS736
static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
			       struct scatterlist *assoc,
			       struct scatterlist *sgl,
			       struct scatterlist *sglout, uint8_t *iv,
			       uint8_t ivlen,
			       struct qat_crypto_request *qat_req)
{
	struct device *dev = &GET_DEV(inst->accel_dev);
	int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc);
	struct qat_alg_buf_list *bufl;
	struct qat_alg_buf_list *buflout = NULL;
	dma_addr_t blp;
	dma_addr_t bloutp = 0;
	struct scatterlist *sg;
	size_t sz = sizeof(struct qat_alg_buf_list) +
			((1 + n + assoc_n) * sizeof(struct qat_alg_buf));

	if (unlikely(!n))
		return -EINVAL;

	bufl = kmalloc_node(sz, GFP_ATOMIC,
			    dev_to_node(&GET_DEV(inst->accel_dev)));
	if (unlikely(!bufl))
		return -ENOMEM;

	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(dev, blp)))
		goto err;

	for_each_sg(assoc, sg, assoc_n, i) {
		if (!sg->length)
			continue;
		bufl->bufers[bufs].addr = dma_map_single(dev,
							 sg_virt(sg),
							 sg->length,
							 DMA_BIDIRECTIONAL);
		bufl->bufers[bufs].len = sg->length;
		if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
			goto err;
		bufs++;
	}
	bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
						 DMA_BIDIRECTIONAL);
	bufl->bufers[bufs].len = ivlen;
	if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
		goto err;
	bufs++;

	for_each_sg(sgl, sg, n, i) {
		int y = i + bufs;

		bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
						      sg->length,
						      DMA_BIDIRECTIONAL);
		bufl->bufers[y].len = sg->length;
		if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
			goto err;
	}
示例#3
0
static int flexrm_spu_dma_map(struct device *dev, struct brcm_message *msg)
{
	int rc;

	rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
			DMA_TO_DEVICE);
	if (rc < 0)
		return rc;

	rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
			DMA_FROM_DEVICE);
	if (rc < 0) {
		dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
			     DMA_TO_DEVICE);
		return rc;
	}

	return 0;
}
示例#4
0
static int omap_crypto_copy_sg_lists(int total, int bs,
				     struct scatterlist **sg,
				     struct scatterlist *new_sg, u16 flags)
{
	int n = sg_nents(*sg);
	struct scatterlist *tmp;

	if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY)) {
		new_sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
		if (!new_sg)
			return -ENOMEM;

		sg_init_table(new_sg, n);
	}

	tmp = new_sg;

	while (*sg && total) {
		int len = (*sg)->length;

		if (total < len)
			len = total;

		if (len > 0) {
			total -= len;
			sg_set_page(tmp, sg_page(*sg), len, (*sg)->offset);
			if (total <= 0)
				sg_mark_end(tmp);
			tmp = sg_next(tmp);
		}

		*sg = sg_next(*sg);
	}

	*sg = new_sg;

	return 0;
}
示例#5
0
static int
__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req,
		struct ablkcipher_request *req,
		struct data_queue *data_vq,
		__u8 op)
{
	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
	struct virtio_crypto_ablkcipher_ctx *ctx = vc_req->ablkcipher_ctx;
	struct virtio_crypto *vcrypto = ctx->vcrypto;
	struct virtio_crypto_op_data_req *req_data;
	int src_nents, dst_nents;
	int err;
	unsigned long flags;
	struct scatterlist outhdr, iv_sg, status_sg, **sgs;
	int i;
	u64 dst_len;
	unsigned int num_out = 0, num_in = 0;
	int sg_total;
	uint8_t *iv;

	src_nents = sg_nents_for_len(req->src, req->nbytes);
	dst_nents = sg_nents(req->dst);

	pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
			src_nents, dst_nents);

	/* Why 3?  outhdr + iv + inhdr */
	sg_total = src_nents + dst_nents + 3;
	sgs = kzalloc_node(sg_total * sizeof(*sgs), GFP_ATOMIC,
				dev_to_node(&vcrypto->vdev->dev));
	if (!sgs)
		return -ENOMEM;

	req_data = kzalloc_node(sizeof(*req_data), GFP_ATOMIC,
				dev_to_node(&vcrypto->vdev->dev));
	if (!req_data) {
		kfree(sgs);
		return -ENOMEM;
	}

	vc_req->req_data = req_data;
	vc_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
	/* Head of operation */
	if (op) {
		req_data->header.session_id =
			cpu_to_le64(ctx->enc_sess_info.session_id);
		req_data->header.opcode =
			cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
	} else {
		req_data->header.session_id =
			cpu_to_le64(ctx->dec_sess_info.session_id);
	    req_data->header.opcode =
			cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
	}
	req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
	req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
	req_data->u.sym_req.u.cipher.para.src_data_len =
			cpu_to_le32(req->nbytes);

	dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
	if (unlikely(dst_len > U32_MAX)) {
		pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
		err = -EINVAL;
		goto free;
	}

	pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
			req->nbytes, dst_len);

	if (unlikely(req->nbytes + dst_len + ivsize +
		sizeof(vc_req->status) > vcrypto->max_size)) {
		pr_err("virtio_crypto: The length is too big\n");
		err = -EINVAL;
		goto free;
	}

	req_data->u.sym_req.u.cipher.para.dst_data_len =
			cpu_to_le32((uint32_t)dst_len);

	/* Outhdr */
	sg_init_one(&outhdr, req_data, sizeof(*req_data));
	sgs[num_out++] = &outhdr;

	/* IV */

	/*
	 * Avoid to do DMA from the stack, switch to using
	 * dynamically-allocated for the IV
	 */
	iv = kzalloc_node(ivsize, GFP_ATOMIC,
				dev_to_node(&vcrypto->vdev->dev));
	if (!iv) {
		err = -ENOMEM;
		goto free;
	}
	memcpy(iv, req->info, ivsize);
	sg_init_one(&iv_sg, iv, ivsize);
	sgs[num_out++] = &iv_sg;
	vc_req->iv = iv;

	/* Source data */
	for (i = 0; i < src_nents; i++)
		sgs[num_out++] = &req->src[i];

	/* Destination data */
	for (i = 0; i < dst_nents; i++)
		sgs[num_out + num_in++] = &req->dst[i];

	/* Status */
	sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
	sgs[num_out + num_in++] = &status_sg;

	vc_req->sgs = sgs;

	spin_lock_irqsave(&data_vq->lock, flags);
	err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
				num_in, vc_req, GFP_ATOMIC);
	virtqueue_kick(data_vq->vq);
	spin_unlock_irqrestore(&data_vq->lock, flags);
	if (unlikely(err < 0))
		goto free_iv;

	return 0;

free_iv:
	kzfree(iv);
free:
	kzfree(req_data);
	kfree(sgs);
	return err;
}
static int rk_ahash_digest(struct ahash_request *req)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
	struct rk_crypto_info *dev = NULL;
	unsigned long flags;
	int ret;

	if (!req->nbytes)
		return zero_message_process(req);

	dev = tctx->dev;
	dev->total = req->nbytes;
	dev->left_bytes = req->nbytes;
	dev->aligned = 0;
	dev->mode = 0;
	dev->align_size = 4;
	dev->sg_dst = NULL;
	dev->sg_src = req->src;
	dev->first = req->src;
	dev->nents = sg_nents(req->src);

	switch (crypto_ahash_digestsize(tfm)) {
	case SHA1_DIGEST_SIZE:
		dev->mode = RK_CRYPTO_HASH_SHA1;
		break;
	case SHA256_DIGEST_SIZE:
		dev->mode = RK_CRYPTO_HASH_SHA256;
		break;
	case MD5_DIGEST_SIZE:
		dev->mode = RK_CRYPTO_HASH_MD5;
		break;
	default:
		return -EINVAL;
	}

	rk_ahash_reg_init(dev);

	spin_lock_irqsave(&dev->lock, flags);
	ret = crypto_enqueue_request(&dev->queue, &req->base);
	spin_unlock_irqrestore(&dev->lock, flags);

	tasklet_schedule(&dev->crypto_tasklet);

	/*
	 * it will take some time to process date after last dma transmission.
	 *
	 * waiting time is relative with the last date len,
	 * so cannot set a fixed time here.
	 * 10-50 makes system not call here frequently wasting
	 * efficiency, and make it response quickly when dma
	 * complete.
	 */
	while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
		usleep_range(10, 50);

	memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
		      crypto_ahash_digestsize(tfm));

	return 0;
}
示例#7
0
/*
 * mpi_read_raw_from_sgl() - Function allocates an MPI and populates it with
 *			     data from the sgl
 *
 * This function works in the same way as the mpi_read_raw_data, but it
 * takes an sgl instead of void * buffer. i.e. it allocates
 * a new MPI and reads the content of the sgl to the MPI.
 *
 * @sgl:	scatterlist to read from
 * @len:	number of bytes to read
 *
 * Return:	Pointer to a new MPI or NULL on error
 */
MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len)
{
	struct scatterlist *sg;
	int x, i, j, z, lzeros, ents;
	unsigned int nbits, nlimbs, nbytes;
	mpi_limb_t a;
	MPI val = NULL;

	lzeros = 0;
	ents = sg_nents(sgl);

	for_each_sg(sgl, sg, ents, i) {
		const u8 *buff = sg_virt(sg);
		int len = sg->length;

		while (len && !*buff) {
			lzeros++;
			len--;
			buff++;
		}

		if (len && *buff)
			break;

		ents--;
		lzeros = 0;
	}

	sgl = sg;

	if (!ents)
		nbytes = 0;
	else
		nbytes = len - lzeros;

	nbits = nbytes * 8;
	if (nbits > MAX_EXTERN_MPI_BITS) {
		pr_info("MPI: mpi too large (%u bits)\n", nbits);
		return NULL;
	}

	if (nbytes > 0)
		nbits -= count_leading_zeros(*(u8 *)(sg_virt(sgl) + lzeros));
	else
		nbits = 0;

	nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB);
	val = mpi_alloc(nlimbs);
	if (!val)
		return NULL;

	val->nbits = nbits;
	val->sign = 0;
	val->nlimbs = nlimbs;

	if (nbytes == 0)
		return val;

	j = nlimbs - 1;
	a = 0;
	z = 0;
	x = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
	x %= BYTES_PER_MPI_LIMB;

	for_each_sg(sgl, sg, ents, i) {
		const u8 *buffer = sg_virt(sg) + lzeros;
		int len = sg->length - lzeros;
		int buf_shift = x;

		if  (sg_is_last(sg) && (len % BYTES_PER_MPI_LIMB))
			len += BYTES_PER_MPI_LIMB - (len % BYTES_PER_MPI_LIMB);

		for (; x < len + buf_shift; x++) {
			a <<= 8;
			a |= *buffer++;
			if (((z + x + 1) % BYTES_PER_MPI_LIMB) == 0) {
				val->d[j--] = a;
				a = 0;
			}
		}
		z += x;
		x = 0;
		lzeros = 0;
	}
	return val;
}
示例#8
0
static int sun4i_ss_opti_poll(struct skcipher_request *areq)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
	struct sun4i_ss_ctx *ss = op->ss;
	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
	u32 mode = ctx->mode;
	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
	u32 rx_cnt = SS_RX_DEFAULT;
	u32 tx_cnt = 0;
	u32 spaces;
	u32 v;
	int err = 0;
	unsigned int i;
	unsigned int ileft = areq->cryptlen;
	unsigned int oleft = areq->cryptlen;
	unsigned int todo;
	struct sg_mapping_iter mi, mo;
	unsigned int oi, oo; /* offset for in and out */
	unsigned long flags;

	if (!areq->cryptlen)
		return 0;

	if (!areq->iv) {
		dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
		return -EINVAL;
	}

	if (!areq->src || !areq->dst) {
		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
		return -EINVAL;
	}

	spin_lock_irqsave(&ss->slock, flags);

	for (i = 0; i < op->keylen; i += 4)
		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);

	if (areq->iv) {
		for (i = 0; i < 4 && i < ivsize / 4; i++) {
			v = *(u32 *)(areq->iv + i * 4);
			writel(v, ss->base + SS_IV0 + i * 4);
		}
	}
	writel(mode, ss->base + SS_CTL);

	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
	sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
	sg_miter_next(&mi);
	sg_miter_next(&mo);
	if (!mi.addr || !mo.addr) {
		dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
		err = -EINVAL;
		goto release_ss;
	}

	ileft = areq->cryptlen / 4;
	oleft = areq->cryptlen / 4;
	oi = 0;
	oo = 0;
	do {
		todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
		if (todo) {
			ileft -= todo;
			writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
			oi += todo * 4;
		}
		if (oi == mi.length) {
			sg_miter_next(&mi);
			oi = 0;
		}

		spaces = readl(ss->base + SS_FCSR);
		rx_cnt = SS_RXFIFO_SPACES(spaces);
		tx_cnt = SS_TXFIFO_SPACES(spaces);

		todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
		if (todo) {
			oleft -= todo;
			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
			oo += todo * 4;
		}
		if (oo == mo.length) {
			sg_miter_next(&mo);
			oo = 0;
		}
	} while (oleft);

	if (areq->iv) {
		for (i = 0; i < 4 && i < ivsize / 4; i++) {
			v = readl(ss->base + SS_IV0 + i * 4);
			*(u32 *)(areq->iv + i * 4) = v;
		}
	}

release_ss:
	sg_miter_stop(&mi);
	sg_miter_stop(&mo);
	writel(0, ss->base + SS_CTL);
	spin_unlock_irqrestore(&ss->slock, flags);
	return err;
}
示例#9
0
/* Generic function that support SG with size not multiple of 4 */
static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
	struct sun4i_ss_ctx *ss = op->ss;
	int no_chunk = 1;
	struct scatterlist *in_sg = areq->src;
	struct scatterlist *out_sg = areq->dst;
	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
	u32 mode = ctx->mode;
	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
	u32 rx_cnt = SS_RX_DEFAULT;
	u32 tx_cnt = 0;
	u32 v;
	u32 spaces;
	int err = 0;
	unsigned int i;
	unsigned int ileft = areq->cryptlen;
	unsigned int oleft = areq->cryptlen;
	unsigned int todo;
	struct sg_mapping_iter mi, mo;
	unsigned int oi, oo;	/* offset for in and out */
	char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
	char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
	unsigned int ob = 0;	/* offset in buf */
	unsigned int obo = 0;	/* offset in bufo*/
	unsigned int obl = 0;	/* length of data in bufo */
	unsigned long flags;

	if (!areq->cryptlen)
		return 0;

	if (!areq->iv) {
		dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
		return -EINVAL;
	}

	if (!areq->src || !areq->dst) {
		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
		return -EINVAL;
	}

	/*
	 * if we have only SGs with size multiple of 4,
	 * we can use the SS optimized function
	 */
	while (in_sg && no_chunk == 1) {
		if (in_sg->length % 4)
			no_chunk = 0;
		in_sg = sg_next(in_sg);
	}
	while (out_sg && no_chunk == 1) {
		if (out_sg->length % 4)
			no_chunk = 0;
		out_sg = sg_next(out_sg);
	}

	if (no_chunk == 1)
		return sun4i_ss_opti_poll(areq);

	spin_lock_irqsave(&ss->slock, flags);

	for (i = 0; i < op->keylen; i += 4)
		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);

	if (areq->iv) {
		for (i = 0; i < 4 && i < ivsize / 4; i++) {
			v = *(u32 *)(areq->iv + i * 4);
			writel(v, ss->base + SS_IV0 + i * 4);
		}
	}
	writel(mode, ss->base + SS_CTL);

	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
	sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
	sg_miter_next(&mi);
	sg_miter_next(&mo);
	if (!mi.addr || !mo.addr) {
		dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
		err = -EINVAL;
		goto release_ss;
	}
	ileft = areq->cryptlen;
	oleft = areq->cryptlen;
	oi = 0;
	oo = 0;

	while (oleft) {
		if (ileft) {
			/*
			 * todo is the number of consecutive 4byte word that we
			 * can read from current SG
			 */
			todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
			if (todo && !ob) {
				writesl(ss->base + SS_RXFIFO, mi.addr + oi,
					todo);
				ileft -= todo * 4;
				oi += todo * 4;
			} else {
				/*
				 * not enough consecutive bytes, so we need to
				 * linearize in buf. todo is in bytes
				 * After that copy, if we have a multiple of 4
				 * we need to be able to write all buf in one
				 * pass, so it is why we min() with rx_cnt
				 */
				todo = min3(rx_cnt * 4 - ob, ileft,
					    mi.length - oi);
				memcpy(buf + ob, mi.addr + oi, todo);
				ileft -= todo;
				oi += todo;
				ob += todo;
				if (!(ob % 4)) {
					writesl(ss->base + SS_RXFIFO, buf,
						ob / 4);
					ob = 0;
				}
			}
			if (oi == mi.length) {
				sg_miter_next(&mi);
				oi = 0;
			}
		}

		spaces = readl(ss->base + SS_FCSR);
		rx_cnt = SS_RXFIFO_SPACES(spaces);
		tx_cnt = SS_TXFIFO_SPACES(spaces);
		dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
			mode,
			oi, mi.length, ileft, areq->cryptlen, rx_cnt,
			oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);

		if (!tx_cnt)
			continue;
		/* todo in 4bytes word */
		todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
		if (todo) {
			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
			oleft -= todo * 4;
			oo += todo * 4;
			if (oo == mo.length) {
				sg_miter_next(&mo);
				oo = 0;
			}
		} else {
			/*
			 * read obl bytes in bufo, we read at maximum for
			 * emptying the device
			 */
			readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
			obl = tx_cnt * 4;
			obo = 0;
			do {
				/*
				 * how many bytes we can copy ?
				 * no more than remaining SG size
				 * no more than remaining buffer
				 * no need to test against oleft
				 */
				todo = min(mo.length - oo, obl - obo);
				memcpy(mo.addr + oo, bufo + obo, todo);
				oleft -= todo;
				obo += todo;
				oo += todo;
				if (oo == mo.length) {
					sg_miter_next(&mo);
					oo = 0;
				}
			} while (obo < obl);
			/* bufo must be fully used here */
		}
	}
	if (areq->iv) {
		for (i = 0; i < 4 && i < ivsize / 4; i++) {
			v = readl(ss->base + SS_IV0 + i * 4);
			*(u32 *)(areq->iv + i * 4) = v;
		}
	}

release_ss:
	sg_miter_stop(&mi);
	sg_miter_stop(&mo);
	writel(0, ss->base + SS_CTL);
	spin_unlock_irqrestore(&ss->slock, flags);

	return err;
}