コード例 #1
0
ファイル: cc_buffer_mgr.c プロジェクト: Anjali05/linux
static int cc_map_sg(struct device *dev, struct scatterlist *sg,
		     unsigned int nbytes, int direction, u32 *nents,
		     u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
{
	bool is_chained = false;

	if (sg_is_last(sg)) {
		/* One entry only case -set to DLLI */
		if (dma_map_sg(dev, sg, 1, direction) != 1) {
			dev_err(dev, "dma_map_sg() single buffer failed\n");
			return -ENOMEM;
		}
		dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
			&sg_dma_address(sg), sg_page(sg), sg_virt(sg),
			sg->offset, sg->length);
		*lbytes = nbytes;
		*nents = 1;
		*mapped_nents = 1;
	} else {  /*sg_is_last*/
		*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
					  &is_chained);
		if (*nents > max_sg_nents) {
			*nents = 0;
			dev_err(dev, "Too many fragments. current %d max %d\n",
				*nents, max_sg_nents);
			return -ENOMEM;
		}
		if (!is_chained) {
			/* In case of mmu the number of mapped nents might
			 * be changed from the original sgl nents
			 */
			*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
			if (*mapped_nents == 0) {
				*nents = 0;
				dev_err(dev, "dma_map_sg() sg buffer failed\n");
				return -ENOMEM;
			}
		} else {
			/*In this case the driver maps entry by entry so it
			 * must have the same nents before and after map
			 */
			*mapped_nents = cc_dma_map_sg(dev, sg, *nents,
						      direction);
			if (*mapped_nents != *nents) {
				*nents = *mapped_nents;
				dev_err(dev, "dma_map_sg() sg buffer failed\n");
				return -ENOMEM;
			}
		}
	}

	return 0;
}
コード例 #2
0
/**
 * sg_next - return the next scatterlist entry in a list
 * @sg:		The current sg entry
 *
 * Description:
 *   Usually the next entry will be @sg@ + 1, but if this sg element is part
 *   of a chained scatterlist, it could jump to the start of a new
 *   scatterlist array.
 *
 **/
struct scatterlist *sg_next(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
	BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
	if (sg_is_last(sg))
		return NULL;

	sg++;
	if (unlikely(sg_is_chain(sg)))
		sg = sg_chain_ptr(sg);

	return sg;
}
コード例 #3
0
static void s5p_aes_rx(struct s5p_aes_dev *dev)
{
	int err;

	s5p_unset_indata(dev);

	if (!sg_is_last(dev->sg_src)) {
		err = s5p_set_indata(dev, sg_next(dev->sg_src));
		if (err) {
			s5p_aes_complete(dev, err);
			return;
		}

		s5p_set_dma_indata(dev, dev->sg_src);
	}
}
コード例 #4
0
static void s5p_aes_tx(struct s5p_aes_dev *dev)
{
	int err = 0;

	s5p_unset_outdata(dev);

	if (!sg_is_last(dev->sg_dst)) {
		err = s5p_set_outdata(dev, sg_next(dev->sg_dst));
		if (err) {
			s5p_aes_complete(dev, err);
			return;
		}

		s5p_set_dma_outdata(dev, dev->sg_dst);
	} else
		s5p_aes_complete(dev, err);
}
コード例 #5
0
/**
 * sg_last - return the last scatterlist entry in a list
 * @sgl:	First entry in the scatterlist
 * @nents:	Number of entries in the scatterlist
 *
 * Description:
 *   Should only be used casually, it (currently) scans the entire list
 *   to get the last entry.
 *
 *   Note that the @sgl@ pointer passed in need not be the first one,
 *   the important bit is that @nents@ denotes the number of entries that
 *   exist from @sgl@.
 *
 **/
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
{
#ifndef ARCH_HAS_SG_CHAIN
	struct scatterlist *ret = &sgl[nents - 1];
#else
	struct scatterlist *sg, *ret = NULL;
	unsigned int i;

	for_each_sg(sgl, sg, nents, i)
		ret = sg;

#endif
#ifdef CONFIG_DEBUG_SG
	BUG_ON(sgl[0].sg_magic != SG_MAGIC);
	BUG_ON(!sg_is_last(ret));
#endif
	return ret;
}
コード例 #6
0
ファイル: tdma.c プロジェクト: 020gzh/linux
bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter,
					struct mv_cesa_sg_dma_iter *sgiter,
					unsigned int len)
{
	if (!sgiter->sg)
		return false;

	sgiter->op_offset += len;
	sgiter->offset += len;
	if (sgiter->offset == sg_dma_len(sgiter->sg)) {
		if (sg_is_last(sgiter->sg))
			return false;
		sgiter->offset = 0;
		sgiter->sg = sg_next(sgiter->sg);
	}

	if (sgiter->op_offset == iter->op_len)
		return false;

	return true;
}
コード例 #7
0
static int dx_map_sg(struct device *dev, struct scatterlist *sg,
		     unsigned int nbytes, int direction,
		     uint32_t *nents, uint32_t max_sg_nents,
		     int *lbytes)
{
	if (sg_is_last(sg)) {
		/* One entry only case -set to DLLI */
		if ( unlikely( dma_map_sg(dev, sg, 1, direction) != 1 ) ) {
			DX_LOG_ERR("dma_map_sg() single buffer failed %s\n ",
				   get_dir_type(direction));
			return -ENOMEM;
		}
		DX_LOG_DEBUG("Mapped sg: dma_address=0x%08lX "
			     "page_link=0x%08lX addr=0x%08lX offset=%u "
			     "length=%u\n",
			     (unsigned long)sg_dma_address(sg),
			     sg->page_link,
			     (unsigned long)sg_virt(sg),
			     sg->offset, sg->length);
		*lbytes = nbytes;
		*nents = 1;
	} else {  /*sg_is_last*/

		*nents = sg_count_ents(sg, nbytes, lbytes);
		if (*nents > max_sg_nents) {
			DX_LOG_ERR("Too many fragments. current %d max %d\n",
				   *nents, max_sg_nents);
			return -ENOMEM;
		}
		/* TODO - verify num of entries */
		if ( unlikely( dma_map_sg(dev, sg, *nents, direction)
			      != *nents ) ) {
			DX_LOG_ERR("dma_map_sg() sg buffer failed - %s\n",
				   get_dir_type(direction));
			return -ENOMEM;
		}
	}

	return 0;
}
コード例 #8
0
static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
{
	int err = 0;

	dev->unload_data(dev);
	if (dev->left_bytes) {
		if (dev->aligned) {
			if (sg_is_last(dev->sg_src)) {
				dev_warn(dev->dev, "[%s:%d], Lack of data\n",
					 __func__, __LINE__);
				err = -ENOMEM;
				goto out_rx;
			}
			dev->sg_src = sg_next(dev->sg_src);
		}
		err = rk_ahash_set_data_start(dev);
	} else {
		dev->complete(dev, 0);
	}

out_rx:
	return err;
}
コード例 #9
0
static int __driver_rfc4106_decrypt(struct aead_request *req)
{
	u8 one_entry_in_sg = 0;
	u8 *src, *dst, *assoc;
	unsigned long tempCipherLen = 0;
	__be32 counter = cpu_to_be32(1);
	int retval = 0;
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
	void *aes_ctx = &(ctx->aes_key_expanded);
	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
	u8 iv_and_authTag[32+AESNI_ALIGN];
	u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
	u8 *authTag = iv + 16;
	struct scatter_walk src_sg_walk;
	struct scatter_walk assoc_sg_walk;
	struct scatter_walk dst_sg_walk;
	unsigned int i;

	if (unlikely((req->cryptlen < auth_tag_len) ||
		(req->assoclen != 8 && req->assoclen != 12)))
		return -EINVAL;
	/* Assuming we are supporting rfc4106 64-bit extended */
	/* sequence numbers We need to have the AAD length */
	/* equal to 8 or 12 bytes */

	tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
	/* IV below built */
	for (i = 0; i < 4; i++)
		*(iv+i) = ctx->nonce[i];
	for (i = 0; i < 8; i++)
		*(iv+4+i) = req->iv[i];
	*((__be32 *)(iv+12)) = counter;

	if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
		one_entry_in_sg = 1;
		scatterwalk_start(&src_sg_walk, req->src);
		scatterwalk_start(&assoc_sg_walk, req->assoc);
		src = scatterwalk_map(&src_sg_walk);
		assoc = scatterwalk_map(&assoc_sg_walk);
		dst = src;
		if (unlikely(req->src != req->dst)) {
			scatterwalk_start(&dst_sg_walk, req->dst);
			dst = scatterwalk_map(&dst_sg_walk);
		}

	} else {
		/* Allocate memory for src, dst, assoc */
		src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
		if (!src)
			return -ENOMEM;
		assoc = (src + req->cryptlen + auth_tag_len);
		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
		scatterwalk_map_and_copy(assoc, req->assoc, 0,
			req->assoclen, 0);
		dst = src;
	}

	aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
		ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
		authTag, auth_tag_len);

	/* Compare generated tag with passed in tag. */
	retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
		-EBADMSG : 0;

	if (one_entry_in_sg) {
		if (unlikely(req->src != req->dst)) {
			scatterwalk_unmap(dst);
			scatterwalk_done(&dst_sg_walk, 0, 0);
		}
		scatterwalk_unmap(src);
		scatterwalk_unmap(assoc);
		scatterwalk_done(&src_sg_walk, 0, 0);
		scatterwalk_done(&assoc_sg_walk, 0, 0);
	} else {
		scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
		kfree(src);
	}
	return retval;
}
コード例 #10
0
ファイル: dma-example.c プロジェクト: 020gzh/linux
static int __init example_init(void)
{
	int			i;
	unsigned int		ret;
	unsigned int		nents;
	struct scatterlist	sg[10];

	printk(KERN_INFO "DMA fifo test start\n");

	if (kfifo_alloc(&fifo, FIFO_SIZE, GFP_KERNEL)) {
		printk(KERN_WARNING "error kfifo_alloc\n");
		return -ENOMEM;
	}

	printk(KERN_INFO "queue size: %u\n", kfifo_size(&fifo));

	kfifo_in(&fifo, "test", 4);

	for (i = 0; i != 9; i++)
		kfifo_put(&fifo, i);

	/* kick away first byte */
	kfifo_skip(&fifo);

	printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo));

	/*
	 * Configure the kfifo buffer to receive data from DMA input.
	 *
	 *  .--------------------------------------.
	 *  | 0 | 1 | 2 | ... | 12 | 13 | ... | 31 |
	 *  |---|------------------|---------------|
	 *   \_/ \________________/ \_____________/
	 *    \          \                  \
	 *     \          \_allocated data   \
	 *      \_*free space*                \_*free space*
	 *
	 * We need two different SG entries: one for the free space area at the
	 * end of the kfifo buffer (19 bytes) and another for the first free
	 * byte at the beginning, after the kfifo_skip().
	 */
	sg_init_table(sg, ARRAY_SIZE(sg));
	nents = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE);
	printk(KERN_INFO "DMA sgl entries: %d\n", nents);
	if (!nents) {
		/* fifo is full and no sgl was created */
		printk(KERN_WARNING "error kfifo_dma_in_prepare\n");
		return -EIO;
	}

	/* receive data */
	printk(KERN_INFO "scatterlist for receive:\n");
	for (i = 0; i < nents; i++) {
		printk(KERN_INFO
		"sg[%d] -> "
		"page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
			i, sg[i].page_link, sg[i].offset, sg[i].length);

		if (sg_is_last(&sg[i]))
			break;
	}

	/* put here your code to setup and exectute the dma operation */
	/* ... */

	/* example: zero bytes received */
	ret = 0;

	/* finish the dma operation and update the received data */
	kfifo_dma_in_finish(&fifo, ret);

	/* Prepare to transmit data, example: 8 bytes */
	nents = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8);
	printk(KERN_INFO "DMA sgl entries: %d\n", nents);
	if (!nents) {
		/* no data was available and no sgl was created */
		printk(KERN_WARNING "error kfifo_dma_out_prepare\n");
		return -EIO;
	}

	printk(KERN_INFO "scatterlist for transmit:\n");
	for (i = 0; i < nents; i++) {
		printk(KERN_INFO
		"sg[%d] -> "
		"page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
			i, sg[i].page_link, sg[i].offset, sg[i].length);

		if (sg_is_last(&sg[i]))
			break;
	}

	/* put here your code to setup and exectute the dma operation */
	/* ... */

	/* example: 5 bytes transmitted */
	ret = 5;

	/* finish the dma operation and update the transmitted data */
	kfifo_dma_out_finish(&fifo, ret);

	ret = kfifo_len(&fifo);
	printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo));

	if (ret != 7) {
		printk(KERN_WARNING "size mismatch: test failed");
		return -EIO;
	}
	printk(KERN_INFO "test passed\n");

	return 0;
}
コード例 #11
0
/*
 * preps Ep pointers & data counters for next packet
 * (fragment of the request) xfer returns true if
 * there is a next transfer, and false if all bytes in
 * current request have been xfered
 */
static inline bool prep_for_next_xfer(cy_as_hal_device_tag tag, uint8_t ep)
{

	if (!end_points[ep].sg_list_enabled) {
		/*
		 * no further transfers for non storage EPs
		 * (like EP2 during firmware download, done
		 * in 64 byte chunks)
		 */
		if (end_points[ep].req_xfer_cnt >= end_points[ep].req_length) {
			DBGPRN("<1> %s():RQ sz:%d non-_sg EP:%d completed\n",
				__func__, end_points[ep].req_length, ep);

			/*
			 * no more transfers, we are done with the request
			 */
			return false;
		}

		/*
		 * calculate size of the next DMA xfer, corner
		 * case for non-storage EPs where transfer size
		 * is not egual N * HAL_DMA_PKT_SZ xfers
		 */
		if ((end_points[ep].req_length - end_points[ep].req_xfer_cnt)
		>= HAL_DMA_PKT_SZ) {
				end_points[ep].dma_xfer_sz = HAL_DMA_PKT_SZ;
		} else {
			/*
			 * that would be the last chunk less
			 * than P-port max size
			 */
			end_points[ep].dma_xfer_sz = end_points[ep].req_length -
					end_points[ep].req_xfer_cnt;
		}

		return true;
	}

	/*
	 * for SG_list assisted dma xfers
	 * are we done with current SG ?
	 */
	if (end_points[ep].seg_xfer_cnt ==  end_points[ep].sg_p->length) {
		/*
		 *  was it the Last SG segment on the list ?
		 */
		if (sg_is_last(end_points[ep].sg_p)) {
			DBGPRN("<1> %s: EP:%d completed,"
					"%d bytes xfered\n",
					__func__,
					ep,
					end_points[ep].req_xfer_cnt
			);

			return false;
		} else {
			/*
			 * There are more SG segments in current
			 * request's sg list setup new segment
			 */

			end_points[ep].seg_xfer_cnt = 0;
			end_points[ep].sg_p = sg_next(end_points[ep].sg_p);
			/* set data pointer for next DMA sg transfer*/
			end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
			DBGPRN("<1> %s new SG:_va:%p\n\n",
					__func__, end_points[ep].data_p);
		}

	}

	/*
	 * for sg list xfers it will always be 512 or 1024
	 */
	end_points[ep].dma_xfer_sz = HAL_DMA_PKT_SZ;

	/*
	 * next transfer is required
	 */

	return true;
}
コード例 #12
0
static int __driver_rfc4106_encrypt(struct aead_request *req)
{
	u8 one_entry_in_sg = 0;
	u8 *src, *dst, *assoc;
	__be32 counter = cpu_to_be32(1);
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
	void *aes_ctx = &(ctx->aes_key_expanded);
	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
	u8 iv_tab[16+AESNI_ALIGN];
	u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
	struct scatter_walk src_sg_walk;
	struct scatter_walk assoc_sg_walk;
	struct scatter_walk dst_sg_walk;
	unsigned int i;

	/* Assuming we are supporting rfc4106 64-bit extended */
	/* sequence numbers We need to have the AAD length equal */
	/* to 8 or 12 bytes */
	if (unlikely(req->assoclen != 8 && req->assoclen != 12))
		return -EINVAL;
	/* IV below built */
	for (i = 0; i < 4; i++)
		*(iv+i) = ctx->nonce[i];
	for (i = 0; i < 8; i++)
		*(iv+4+i) = req->iv[i];
	*((__be32 *)(iv+12)) = counter;

	if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
		one_entry_in_sg = 1;
		scatterwalk_start(&src_sg_walk, req->src);
		scatterwalk_start(&assoc_sg_walk, req->assoc);
		src = scatterwalk_map(&src_sg_walk);
		assoc = scatterwalk_map(&assoc_sg_walk);
		dst = src;
		if (unlikely(req->src != req->dst)) {
			scatterwalk_start(&dst_sg_walk, req->dst);
			dst = scatterwalk_map(&dst_sg_walk);
		}

	} else {
		/* Allocate memory for src, dst, assoc */
		src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
			GFP_ATOMIC);
		if (unlikely(!src))
			return -ENOMEM;
		assoc = (src + req->cryptlen + auth_tag_len);
		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
		scatterwalk_map_and_copy(assoc, req->assoc, 0,
					req->assoclen, 0);
		dst = src;
	}

	aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
		ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
		+ ((unsigned long)req->cryptlen), auth_tag_len);

	/* The authTag (aka the Integrity Check Value) needs to be written
	 * back to the packet. */
	if (one_entry_in_sg) {
		if (unlikely(req->src != req->dst)) {
			scatterwalk_unmap(dst);
			scatterwalk_done(&dst_sg_walk, 0, 0);
		}
		scatterwalk_unmap(src);
		scatterwalk_unmap(assoc);
		scatterwalk_done(&src_sg_walk, 0, 0);
		scatterwalk_done(&assoc_sg_walk, 0, 0);
	} else {
		scatterwalk_map_and_copy(dst, req->dst, 0,
			req->cryptlen + auth_tag_len, 1);
		kfree(src);
	}
	return 0;
}
コード例 #13
0
static int __init example_init(void)
{
	int			i;
	unsigned int		ret;
	struct scatterlist	sg[10];

	printk(KERN_INFO "DMA fifo test start\n");

	if (kfifo_alloc(&fifo, FIFO_SIZE, GFP_KERNEL)) {
		printk(KERN_ERR "error kfifo_alloc\n");
		return 1;
	}

	printk(KERN_INFO "queue size: %u\n", kfifo_size(&fifo));

	kfifo_in(&fifo, "test", 4);

	for (i = 0; i != 9; i++)
		kfifo_put(&fifo, &i);

	/* kick away first byte */
	ret = kfifo_get(&fifo, &i);

	printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo));

	ret = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE);
	printk(KERN_INFO "DMA sgl entries: %d\n", ret);

	/* if 0 was returned, fifo is full and no sgl was created */
	if (ret) {
		printk(KERN_INFO "scatterlist for receive:\n");
		for (i = 0; i < ARRAY_SIZE(sg); i++) {
			printk(KERN_INFO
			"sg[%d] -> "
			"page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
				i, sg[i].page_link, sg[i].offset, sg[i].length);

			if (sg_is_last(&sg[i]))
				break;
		}

		/* but here your code to setup and exectute the dma operation */
		/* ... */

		/* example: zero bytes received */
		ret = 0;

		/* finish the dma operation and update the received data */
		kfifo_dma_in_finish(&fifo, ret);
	}

	ret = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8);
	printk(KERN_INFO "DMA sgl entries: %d\n", ret);

	/* if 0 was returned, no data was available and no sgl was created */
	if (ret) {
		printk(KERN_INFO "scatterlist for transmit:\n");
		for (i = 0; i < ARRAY_SIZE(sg); i++) {
			printk(KERN_INFO
			"sg[%d] -> "
			"page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
				i, sg[i].page_link, sg[i].offset, sg[i].length);

			if (sg_is_last(&sg[i]))
				break;
		}

		/* but here your code to setup and exectute the dma operation */
		/* ... */

		/* example: 5 bytes transmitted */
		ret = 5;

		/* finish the dma operation and update the transmitted data */
		kfifo_dma_out_finish(&fifo, ret);
	}

	printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo));

	return 0;
}
コード例 #14
0
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		       struct scatterlist *src, unsigned int nbytes)
{
	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	be128 buf[8];
	struct xts_crypt_req req = {
		.tbuf = buf,
		.tbuflen = sizeof(buf),

		.tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
		.tweak_fn = aesni_xts_tweak,
		.crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
		.crypt_fn = lrw_xts_encrypt_callback,
	};
	int ret;

	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

	kernel_fpu_begin();
	ret = xts_crypt(desc, dst, src, nbytes, &req);
	kernel_fpu_end();

	return ret;
}

static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		       struct scatterlist *src, unsigned int nbytes)
{
	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	be128 buf[8];
	struct xts_crypt_req req = {
		.tbuf = buf,
		.tbuflen = sizeof(buf),

		.tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
		.tweak_fn = aesni_xts_tweak,
		.crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
		.crypt_fn = lrw_xts_decrypt_callback,
	};
	int ret;

	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

	kernel_fpu_begin();
	ret = xts_crypt(desc, dst, src, nbytes, &req);
	kernel_fpu_end();

	return ret;
}

#endif

#ifdef CONFIG_X86_64
static int rfc4106_init(struct crypto_tfm *tfm)
{
	struct cryptd_aead *cryptd_tfm;
	struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
		PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
	struct crypto_aead *cryptd_child;
	struct aesni_rfc4106_gcm_ctx *child_ctx;
	cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
				       CRYPTO_ALG_INTERNAL,
				       CRYPTO_ALG_INTERNAL);
	if (IS_ERR(cryptd_tfm))
		return PTR_ERR(cryptd_tfm);

	cryptd_child = cryptd_aead_child(cryptd_tfm);
	child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
	memcpy(child_ctx, ctx, sizeof(*ctx));
	ctx->cryptd_tfm = cryptd_tfm;
	tfm->crt_aead.reqsize = sizeof(struct aead_request)
		+ crypto_aead_reqsize(&cryptd_tfm->base);
	return 0;
}

static void rfc4106_exit(struct crypto_tfm *tfm)
{
	struct aesni_rfc4106_gcm_ctx *ctx =
		(struct aesni_rfc4106_gcm_ctx *)
		PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
	if (!IS_ERR(ctx->cryptd_tfm))
		cryptd_free_aead(ctx->cryptd_tfm);
	return;
}

static void
rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
{
	struct aesni_gcm_set_hash_subkey_result *result = req->data;

	if (err == -EINPROGRESS)
		return;
	result->err = err;
	complete(&result->completion);
}

static int
rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
{
	struct crypto_ablkcipher *ctr_tfm;
	struct ablkcipher_request *req;
	int ret = -EINVAL;
	struct aesni_hash_subkey_req_data *req_data;

	ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
	if (IS_ERR(ctr_tfm))
		return PTR_ERR(ctr_tfm);

	crypto_ablkcipher_clear_flags(ctr_tfm, ~0);

	ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
	if (ret)
		goto out_free_ablkcipher;

	ret = -ENOMEM;
	req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
	if (!req)
		goto out_free_ablkcipher;

	req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
	if (!req_data)
		goto out_free_request;

	memset(req_data->iv, 0, sizeof(req_data->iv));

	/* Clear the data in the hash sub key container to zero.*/
	/* We want to cipher all zeros to create the hash sub key. */
	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);

	init_completion(&req_data->result.completion);
	sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
	ablkcipher_request_set_tfm(req, ctr_tfm);
	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
					CRYPTO_TFM_REQ_MAY_BACKLOG,
					rfc4106_set_hash_subkey_done,
					&req_data->result);

	ablkcipher_request_set_crypt(req, &req_data->sg,
		&req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);

	ret = crypto_ablkcipher_encrypt(req);
	if (ret == -EINPROGRESS || ret == -EBUSY) {
		ret = wait_for_completion_interruptible
			(&req_data->result.completion);
		if (!ret)
			ret = req_data->result.err;
	}
	kfree(req_data);
out_free_request:
	ablkcipher_request_free(req);
out_free_ablkcipher:
	crypto_free_ablkcipher(ctr_tfm);
	return ret;
}

static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
				  unsigned int key_len)
{
	int ret = 0;
	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
	u8 *new_key_align, *new_key_mem = NULL;

	if (key_len < 4) {
		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
		return -EINVAL;
	}
	/*Account for 4 byte nonce at the end.*/
	key_len -= 4;
	if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
	    key_len != AES_KEYSIZE_256) {
		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
		return -EINVAL;
	}

	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
	/*This must be on a 16 byte boundary!*/
	if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
		return -EINVAL;

	if ((unsigned long)key % AESNI_ALIGN) {
		/*key is not aligned: use an auxuliar aligned pointer*/
		new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
		if (!new_key_mem)
			return -ENOMEM;

		new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
		memcpy(new_key_align, key, key_len);
		key = new_key_align;
	}

	if (!irq_fpu_usable())
		ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
		key, key_len);
	else {
		kernel_fpu_begin();
		ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
		kernel_fpu_end();
	}
	/*This must be on a 16 byte boundary!*/
	if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
		ret = -EINVAL;
		goto exit;
	}
	ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
exit:
	kfree(new_key_mem);
	return ret;
}

static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
			   unsigned int key_len)
{
	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
	struct crypto_aead *child = cryptd_aead_child(ctx->cryptd_tfm);
	struct aesni_rfc4106_gcm_ctx *c_ctx = aesni_rfc4106_gcm_ctx_get(child);
	struct cryptd_aead *cryptd_tfm = ctx->cryptd_tfm;
	int ret;

	ret = crypto_aead_setkey(child, key, key_len);
	if (!ret) {
		memcpy(ctx, c_ctx, sizeof(*ctx));
		ctx->cryptd_tfm = cryptd_tfm;
	}
	return ret;
}

static int common_rfc4106_set_authsize(struct crypto_aead *aead,
				       unsigned int authsize)
{
	switch (authsize) {
	case 8:
	case 12:
	case 16:
		break;
	default:
		return -EINVAL;
	}
	crypto_aead_crt(aead)->authsize = authsize;
	return 0;
}

/* This is the Integrity Check Value (aka the authentication tag length and can
 * be 8, 12 or 16 bytes long. */
static int rfc4106_set_authsize(struct crypto_aead *parent,
				unsigned int authsize)
{
	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
	struct crypto_aead *child = cryptd_aead_child(ctx->cryptd_tfm);
	int ret;

	ret = crypto_aead_setauthsize(child, authsize);
	if (!ret)
		crypto_aead_crt(parent)->authsize = authsize;
	return ret;
}

static int __driver_rfc4106_encrypt(struct aead_request *req)
{
	u8 one_entry_in_sg = 0;
	u8 *src, *dst, *assoc;
	__be32 counter = cpu_to_be32(1);
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
	u32 key_len = ctx->aes_key_expanded.key_length;
	void *aes_ctx = &(ctx->aes_key_expanded);
	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
	u8 iv_tab[16+AESNI_ALIGN];
	u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
	struct scatter_walk src_sg_walk;
	struct scatter_walk assoc_sg_walk;
	struct scatter_walk dst_sg_walk;
	unsigned int i;

	/* Assuming we are supporting rfc4106 64-bit extended */
	/* sequence numbers We need to have the AAD length equal */
	/* to 8 or 12 bytes */
	if (unlikely(req->assoclen != 8 && req->assoclen != 12))
		return -EINVAL;
	if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16))
	        return -EINVAL;
	if (unlikely(key_len != AES_KEYSIZE_128 &&
	             key_len != AES_KEYSIZE_192 &&
	             key_len != AES_KEYSIZE_256))
	        return -EINVAL;

	/* IV below built */
	for (i = 0; i < 4; i++)
		*(iv+i) = ctx->nonce[i];
	for (i = 0; i < 8; i++)
		*(iv+4+i) = req->iv[i];
	*((__be32 *)(iv+12)) = counter;

	if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
		one_entry_in_sg = 1;
		scatterwalk_start(&src_sg_walk, req->src);
		scatterwalk_start(&assoc_sg_walk, req->assoc);
		src = scatterwalk_map(&src_sg_walk);
		assoc = scatterwalk_map(&assoc_sg_walk);
		dst = src;
		if (unlikely(req->src != req->dst)) {
			scatterwalk_start(&dst_sg_walk, req->dst);
			dst = scatterwalk_map(&dst_sg_walk);
		}

	} else {
		/* Allocate memory for src, dst, assoc */
		src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
			GFP_ATOMIC);
		if (unlikely(!src))
			return -ENOMEM;
		assoc = (src + req->cryptlen + auth_tag_len);
		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
		scatterwalk_map_and_copy(assoc, req->assoc, 0,
					req->assoclen, 0);
		dst = src;
	}

	aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
		ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
		+ ((unsigned long)req->cryptlen), auth_tag_len);

	/* The authTag (aka the Integrity Check Value) needs to be written
	 * back to the packet. */
	if (one_entry_in_sg) {
		if (unlikely(req->src != req->dst)) {
			scatterwalk_unmap(dst);
			scatterwalk_done(&dst_sg_walk, 0, 0);
		}
		scatterwalk_unmap(src);
		scatterwalk_unmap(assoc);
		scatterwalk_done(&src_sg_walk, 0, 0);
		scatterwalk_done(&assoc_sg_walk, 0, 0);
	} else {
		scatterwalk_map_and_copy(dst, req->dst, 0,
			req->cryptlen + auth_tag_len, 1);
		kfree(src);
	}
	return 0;
}
コード例 #15
0
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		       struct scatterlist *src, unsigned int nbytes)
{
	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	be128 buf[8];
	struct xts_crypt_req req = {
		.tbuf = buf,
		.tbuflen = sizeof(buf),

		.tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
		.tweak_fn = aesni_xts_tweak,
		.crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
		.crypt_fn = lrw_xts_encrypt_callback,
	};
	int ret;

	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

	kernel_fpu_begin();
	ret = xts_crypt(desc, dst, src, nbytes, &req);
	kernel_fpu_end();

	return ret;
}

static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		       struct scatterlist *src, unsigned int nbytes)
{
	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	be128 buf[8];
	struct xts_crypt_req req = {
		.tbuf = buf,
		.tbuflen = sizeof(buf),

		.tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
		.tweak_fn = aesni_xts_tweak,
		.crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
		.crypt_fn = lrw_xts_decrypt_callback,
	};
	int ret;

	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

	kernel_fpu_begin();
	ret = xts_crypt(desc, dst, src, nbytes, &req);
	kernel_fpu_end();

	return ret;
}

#endif

#ifdef CONFIG_X86_64
static int rfc4106_init(struct crypto_aead *aead)
{
	struct cryptd_aead *cryptd_tfm;
	struct cryptd_aead **ctx = crypto_aead_ctx(aead);

	cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
				       CRYPTO_ALG_INTERNAL,
				       CRYPTO_ALG_INTERNAL);
	if (IS_ERR(cryptd_tfm))
		return PTR_ERR(cryptd_tfm);

	*ctx = cryptd_tfm;
	crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
	return 0;
}

static void rfc4106_exit(struct crypto_aead *aead)
{
	struct cryptd_aead **ctx = crypto_aead_ctx(aead);

	cryptd_free_aead(*ctx);
}

static int
rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
{
	struct crypto_cipher *tfm;
	int ret;

	tfm = crypto_alloc_cipher("aes", 0, 0);
	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

	ret = crypto_cipher_setkey(tfm, key, key_len);
	if (ret)
		goto out_free_cipher;

	/* Clear the data in the hash sub key container to zero.*/
	/* We want to cipher all zeros to create the hash sub key. */
	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);

	crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey);

out_free_cipher:
	crypto_free_cipher(tfm);
	return ret;
}

static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
				  unsigned int key_len)
{
	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);

	if (key_len < 4) {
		crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
		return -EINVAL;
	}
	/*Account for 4 byte nonce at the end.*/
	key_len -= 4;

	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));

	return aes_set_key_common(crypto_aead_tfm(aead),
				  &ctx->aes_key_expanded, key, key_len) ?:
	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
}

static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
			   unsigned int key_len)
{
	struct cryptd_aead **ctx = crypto_aead_ctx(parent);
	struct cryptd_aead *cryptd_tfm = *ctx;

	return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
}

static int common_rfc4106_set_authsize(struct crypto_aead *aead,
				       unsigned int authsize)
{
	switch (authsize) {
	case 8:
	case 12:
	case 16:
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

/* This is the Integrity Check Value (aka the authentication tag length and can
 * be 8, 12 or 16 bytes long. */
static int rfc4106_set_authsize(struct crypto_aead *parent,
				unsigned int authsize)
{
	struct cryptd_aead **ctx = crypto_aead_ctx(parent);
	struct cryptd_aead *cryptd_tfm = *ctx;

	return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
}

static int helper_rfc4106_encrypt(struct aead_request *req)
{
	u8 one_entry_in_sg = 0;
	u8 *src, *dst, *assoc;
	__be32 counter = cpu_to_be32(1);
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
	void *aes_ctx = &(ctx->aes_key_expanded);
	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
	struct scatter_walk src_sg_walk;
	struct scatter_walk dst_sg_walk = {};
	unsigned int i;

	/* Assuming we are supporting rfc4106 64-bit extended */
	/* sequence numbers We need to have the AAD length equal */
	/* to 16 or 20 bytes */
	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
		return -EINVAL;

	/* IV below built */
	for (i = 0; i < 4; i++)
		*(iv+i) = ctx->nonce[i];
	for (i = 0; i < 8; i++)
		*(iv+4+i) = req->iv[i];
	*((__be32 *)(iv+12)) = counter;

	if (sg_is_last(req->src) &&
	    req->src->offset + req->src->length <= PAGE_SIZE &&
	    sg_is_last(req->dst) &&
	    req->dst->offset + req->dst->length <= PAGE_SIZE) {
		one_entry_in_sg = 1;
		scatterwalk_start(&src_sg_walk, req->src);
		assoc = scatterwalk_map(&src_sg_walk);
		src = assoc + req->assoclen;
		dst = src;
		if (unlikely(req->src != req->dst)) {
			scatterwalk_start(&dst_sg_walk, req->dst);
			dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
		}
	} else {
		/* Allocate memory for src, dst, assoc */
		assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
			GFP_ATOMIC);
		if (unlikely(!assoc))
			return -ENOMEM;
		scatterwalk_map_and_copy(assoc, req->src, 0,
					 req->assoclen + req->cryptlen, 0);
		src = assoc + req->assoclen;
		dst = src;
	}

	kernel_fpu_begin();
	aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
			  ctx->hash_subkey, assoc, req->assoclen - 8,
			  dst + req->cryptlen, auth_tag_len);
	kernel_fpu_end();

	/* The authTag (aka the Integrity Check Value) needs to be written
	 * back to the packet. */
	if (one_entry_in_sg) {
		if (unlikely(req->src != req->dst)) {
			scatterwalk_unmap(dst - req->assoclen);
			scatterwalk_advance(&dst_sg_walk, req->dst->length);
			scatterwalk_done(&dst_sg_walk, 1, 0);
		}
		scatterwalk_unmap(assoc);
		scatterwalk_advance(&src_sg_walk, req->src->length);
		scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
	} else {
		scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
					 req->cryptlen + auth_tag_len, 1);
		kfree(assoc);
	}
	return 0;
}

static int helper_rfc4106_decrypt(struct aead_request *req)
{
	u8 one_entry_in_sg = 0;
	u8 *src, *dst, *assoc;
	unsigned long tempCipherLen = 0;
	__be32 counter = cpu_to_be32(1);
	int retval = 0;
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
	void *aes_ctx = &(ctx->aes_key_expanded);
	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
	u8 authTag[16];
	struct scatter_walk src_sg_walk;
	struct scatter_walk dst_sg_walk = {};
	unsigned int i;

	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
		return -EINVAL;

	/* Assuming we are supporting rfc4106 64-bit extended */
	/* sequence numbers We need to have the AAD length */
	/* equal to 16 or 20 bytes */

	tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
	/* IV below built */
	for (i = 0; i < 4; i++)
		*(iv+i) = ctx->nonce[i];
	for (i = 0; i < 8; i++)
		*(iv+4+i) = req->iv[i];
	*((__be32 *)(iv+12)) = counter;

	if (sg_is_last(req->src) &&
	    req->src->offset + req->src->length <= PAGE_SIZE &&
	    sg_is_last(req->dst) &&
	    req->dst->offset + req->dst->length <= PAGE_SIZE) {
		one_entry_in_sg = 1;
		scatterwalk_start(&src_sg_walk, req->src);
		assoc = scatterwalk_map(&src_sg_walk);
		src = assoc + req->assoclen;
		dst = src;
		if (unlikely(req->src != req->dst)) {
			scatterwalk_start(&dst_sg_walk, req->dst);
			dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
		}

	} else {
		/* Allocate memory for src, dst, assoc */
		assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
		if (!assoc)
			return -ENOMEM;
		scatterwalk_map_and_copy(assoc, req->src, 0,
					 req->assoclen + req->cryptlen, 0);
		src = assoc + req->assoclen;
		dst = src;
	}

	kernel_fpu_begin();
	aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
			  ctx->hash_subkey, assoc, req->assoclen - 8,
			  authTag, auth_tag_len);
	kernel_fpu_end();

	/* Compare generated tag with passed in tag. */
	retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
		-EBADMSG : 0;

	if (one_entry_in_sg) {
		if (unlikely(req->src != req->dst)) {
			scatterwalk_unmap(dst - req->assoclen);
			scatterwalk_advance(&dst_sg_walk, req->dst->length);
			scatterwalk_done(&dst_sg_walk, 1, 0);
		}
		scatterwalk_unmap(assoc);
		scatterwalk_advance(&src_sg_walk, req->src->length);
		scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
	} else {
		scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
					 tempCipherLen, 1);
		kfree(assoc);
	}
	return retval;
}
コード例 #16
0
ファイル: aes_engine.c プロジェクト: andy-padavan/rt-n56u
int mtk_aes_process_sg(struct scatterlist* sg_src,
		struct scatterlist* sg_dst,
		struct mcrypto_ctx *ctx,
		unsigned int nbytes,
		unsigned int mode)
{
	struct scatterlist *next_dst, *next_src;
	struct AES_txdesc* txdesc;
	struct AES_rxdesc* rxdesc;
	u32 aes_txd_info4;
	u32 aes_size_total, aes_size_chunk, aes_free_desc;
	u32 aes_tx_scatter = 0;
	u32 aes_rx_gather = 0;
	u32 i = 1, j = 1;
	unsigned long flags = 0;

	next_src = sg_src;
	next_dst = sg_dst;

	while (sg_dma_len(next_src) == 0) {
		if (sg_is_last(next_src))
			return -EINVAL;
		next_src = sg_next(next_src);
	}

	while (sg_dma_len(next_dst) == 0) {
		if (sg_is_last(next_dst))
			return -EINVAL;
		next_dst = sg_next(next_dst);
	}

	if (ctx->keylen == AES_KEYSIZE_256)
		aes_txd_info4 = TX4_DMA_AES_256;
	else if (ctx->keylen == AES_KEYSIZE_192)
		aes_txd_info4 = TX4_DMA_AES_192;
	else
		aes_txd_info4 = TX4_DMA_AES_128;

	if (mode & MCRYPTO_MODE_ENC)
		aes_txd_info4 |= TX4_DMA_ENC;

	if (mode & MCRYPTO_MODE_CBC)
		aes_txd_info4 |= TX4_DMA_CBC | TX4_DMA_IVR;

	spin_lock_irqsave(&AES_Entry.page_lock, flags);

	DBGPRINT(DBG_HIGH, "\nStart new scater, TX [front=%u rear=%u]; RX [front=%u rear=%u]\n",
			AES_Entry.aes_tx_front_idx, AES_Entry.aes_tx_rear_idx,
			AES_Entry.aes_rx_front_idx, AES_Entry.aes_rx_rear_idx);

	aes_size_total = nbytes;

	if (AES_Entry.aes_tx_front_idx > AES_Entry.aes_tx_rear_idx)
		aes_free_desc = NUM_AES_TX_DESC - (AES_Entry.aes_tx_front_idx - AES_Entry.aes_tx_rear_idx);
	else
		aes_free_desc = AES_Entry.aes_tx_rear_idx - AES_Entry.aes_tx_front_idx;

	/* TX descriptor */
	while (1) {
		if (i > aes_free_desc) {
			spin_unlock_irqrestore(&AES_Entry.page_lock, flags);
			return -EAGAIN;
		}
		
		aes_tx_scatter = (AES_Entry.aes_tx_rear_idx + i) % NUM_AES_TX_DESC;
		txdesc = &AES_Entry.AES_tx_ring0[aes_tx_scatter];
		
		if (sg_dma_len(next_src) == 0)
			goto next_desc_tx;
		
		aes_size_chunk = min(aes_size_total, sg_dma_len(next_src));
		
		DBGPRINT(DBG_HIGH, "AES set TX Desc[%u] Src=%08X, len=%d, Key=%08X, klen=%d\n",
			aes_tx_scatter, (u32)sg_virt(next_src), aes_size_chunk, (u32)ctx->key, ctx->keylen);
		
		if ((mode & MCRYPTO_MODE_CBC) && (i == 1)) {
			if (!ctx->iv)
				memset((void*)txdesc->IV, 0xFF, sizeof(uint32_t)*4);
			else
				memcpy((void*)txdesc->IV, ctx->iv, sizeof(uint32_t)*4);
			txdesc->txd_info4 = aes_txd_info4 | TX4_DMA_KIU;
		} else {
			txdesc->txd_info4 = aes_txd_info4;
		}
		
		if (i == 1) {
			txdesc->SDP0 = (u32)dma_map_single(NULL, ctx->key, ctx->keylen, DMA_TO_DEVICE);
			txdesc->txd_info2 = TX2_DMA_SDL0_SET(ctx->keylen);
		} else {
			txdesc->txd_info2 = 0;
		}
		
		txdesc->SDP1 = (u32)dma_map_single(NULL, sg_virt(next_src), aes_size_chunk, DMA_TO_DEVICE);
		txdesc->txd_info2 |= TX2_DMA_SDL1_SET(aes_size_chunk);
		
		i++;
		aes_size_total -= aes_size_chunk;
next_desc_tx:
		if (!aes_size_total || sg_is_last(next_src)) {
			txdesc->txd_info2 |= TX2_DMA_LS1;
			break;
		}
		
		next_src = sg_next(next_src);
	}

	aes_size_total = nbytes;

	if (AES_Entry.aes_rx_front_idx > AES_Entry.aes_rx_rear_idx)
		aes_free_desc = NUM_AES_RX_DESC - (AES_Entry.aes_rx_front_idx - AES_Entry.aes_rx_rear_idx);
	else
		aes_free_desc = AES_Entry.aes_rx_rear_idx - AES_Entry.aes_rx_front_idx;

	/* RX descriptor */
	while (1) {
		if (j > aes_free_desc) {
			spin_unlock_irqrestore(&AES_Entry.page_lock, flags);
			return -EAGAIN;
		}
		
		aes_rx_gather = (AES_Entry.aes_rx_rear_idx + j) % NUM_AES_RX_DESC;
		rxdesc = &AES_Entry.AES_rx_ring0[aes_rx_gather];
		
		if (sg_dma_len(next_dst) == 0)
			goto next_desc_rx;
		
		aes_size_chunk = min(aes_size_total, sg_dma_len(next_dst));
		
		DBGPRINT(DBG_HIGH, "AES set RX Desc[%u] Dst=%08X, len=%d\n",
			aes_rx_gather, (u32)sg_virt(next_dst), aes_size_chunk);
		
		rxdesc->SDP0 = dma_map_single(NULL, sg_virt(next_dst), aes_size_chunk, DMA_FROM_DEVICE);
		rxdesc->rxd_info2 = RX2_DMA_SDL0_SET(aes_size_chunk);
		
		j++;
		aes_size_total -= aes_size_chunk;
next_desc_rx:
		if (!aes_size_total || sg_is_last(next_dst)) {
			rxdesc->rxd_info2 |= RX2_DMA_LS0;
			break;
		}
		
		next_dst = sg_next(next_dst);
	}

	AES_Entry.aes_tx_rear_idx = aes_tx_scatter;
	AES_Entry.aes_rx_rear_idx = aes_rx_gather;

	DBGPRINT(DBG_MID, "TT [front=%u rear=%u]; RR [front=%u rear=%u]\n",
		AES_Entry.aes_tx_front_idx, AES_Entry.aes_tx_rear_idx,
		AES_Entry.aes_rx_front_idx, AES_Entry.aes_rx_rear_idx);

#if defined (CONFIG_CRYPTO_DEV_MTK_AES_INT)
	INIT_COMPLETION(AES_Entry.op_complete);
#endif

	wmb();

	aes_tx_scatter = (aes_tx_scatter + 1) % NUM_AES_TX_DESC;
	sysRegWrite(AES_TX_CTX_IDX0, cpu_to_le32(aes_tx_scatter));

	spin_unlock_irqrestore(&AES_Entry.page_lock, flags);

#if defined (CONFIG_CRYPTO_DEV_MTK_AES_INT)
	if (wait_for_completion_timeout(&AES_Entry.op_complete, msecs_to_jiffies(200)) == 0) {
		printk("\n%s: PDMA timeout!\n", AES_MODNAME);
		return -ETIMEDOUT;
	}
#endif

	return mtk_aes_poll_done();
}
コード例 #17
0
ファイル: spi-bcm2835.c プロジェクト: AlexShiLucky/linux
/**
 * bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA
 * @master: SPI master
 * @tfr: SPI transfer
 * @bs: BCM2835 SPI controller
 * @cs: CS register
 *
 * A limitation in DMA mode is that the FIFO must be accessed in 4 byte chunks.
 * Only the final write access is permitted to transmit less than 4 bytes, the
 * SPI controller deduces its intended size from the DLEN register.
 *
 * If a TX or RX sglist contains multiple entries, one per page, and the first
 * entry starts in the middle of a page, that first entry's length may not be
 * a multiple of 4.  Subsequent entries are fine because they span an entire
 * page, hence do have a length that's a multiple of 4.
 *
 * This cannot happen with kmalloc'ed buffers (which is what most clients use)
 * because they are contiguous in physical memory and therefore not split on
 * page boundaries by spi_map_buf().  But it *can* happen with vmalloc'ed
 * buffers.
 *
 * The DMA engine is incapable of combining sglist entries into a continuous
 * stream of 4 byte chunks, it treats every entry separately:  A TX entry is
 * rounded up a to a multiple of 4 bytes by transmitting surplus bytes, an RX
 * entry is rounded up by throwing away received bytes.
 *
 * Overcome this limitation by transferring the first few bytes without DMA:
 * E.g. if the first TX sglist entry's length is 23 and the first RX's is 42,
 * write 3 bytes to the TX FIFO but read only 2 bytes from the RX FIFO.
 * The residue of 1 byte in the RX FIFO is picked up by DMA.  Together with
 * the rest of the first RX sglist entry it makes up a multiple of 4 bytes.
 *
 * Should the RX prologue be larger, say, 3 vis-à-vis a TX prologue of 1,
 * write 1 + 4 = 5 bytes to the TX FIFO and read 3 bytes from the RX FIFO.
 * Caution, the additional 4 bytes spill over to the second TX sglist entry
 * if the length of the first is *exactly* 1.
 *
 * At most 6 bytes are written and at most 3 bytes read.  Do we know the
 * transfer has this many bytes?  Yes, see BCM2835_SPI_DMA_MIN_LENGTH.
 *
 * The FIFO is normally accessed with 8-bit width by the CPU and 32-bit width
 * by the DMA engine.  Toggling the DMA Enable flag in the CS register switches
 * the width but also garbles the FIFO's contents.  The prologue must therefore
 * be transmitted in 32-bit width to ensure that the following DMA transfer can
 * pick up the residue in the RX FIFO in ungarbled form.
 */
static void bcm2835_spi_transfer_prologue(struct spi_master *master,
					  struct spi_transfer *tfr,
					  struct bcm2835_spi *bs,
					  u32 cs)
{
	int tx_remaining;

	bs->tfr		 = tfr;
	bs->tx_prologue  = 0;
	bs->rx_prologue  = 0;
	bs->tx_spillover = false;

	if (!sg_is_last(&tfr->tx_sg.sgl[0]))
		bs->tx_prologue = sg_dma_len(&tfr->tx_sg.sgl[0]) & 3;

	if (!sg_is_last(&tfr->rx_sg.sgl[0])) {
		bs->rx_prologue = sg_dma_len(&tfr->rx_sg.sgl[0]) & 3;

		if (bs->rx_prologue > bs->tx_prologue) {
			if (sg_is_last(&tfr->tx_sg.sgl[0])) {
				bs->tx_prologue  = bs->rx_prologue;
			} else {
				bs->tx_prologue += 4;
				bs->tx_spillover =
					!(sg_dma_len(&tfr->tx_sg.sgl[0]) & ~3);
			}
		}
	}

	/* rx_prologue > 0 implies tx_prologue > 0, so check only the latter */
	if (!bs->tx_prologue)
		return;

	/* Write and read RX prologue.  Adjust first entry in RX sglist. */
	if (bs->rx_prologue) {
		bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->rx_prologue);
		bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA
						  | BCM2835_SPI_CS_DMAEN);
		bcm2835_wr_fifo_count(bs, bs->rx_prologue);
		bcm2835_wait_tx_fifo_empty(bs);
		bcm2835_rd_fifo_count(bs, bs->rx_prologue);
		bcm2835_spi_reset_hw(master);

		dma_sync_single_for_device(master->dma_rx->device->dev,
					   sg_dma_address(&tfr->rx_sg.sgl[0]),
					   bs->rx_prologue, DMA_FROM_DEVICE);

		sg_dma_address(&tfr->rx_sg.sgl[0]) += bs->rx_prologue;
		sg_dma_len(&tfr->rx_sg.sgl[0])     -= bs->rx_prologue;
	}

	/*
	 * Write remaining TX prologue.  Adjust first entry in TX sglist.
	 * Also adjust second entry if prologue spills over to it.
	 */
	tx_remaining = bs->tx_prologue - bs->rx_prologue;
	if (tx_remaining) {
		bcm2835_wr(bs, BCM2835_SPI_DLEN, tx_remaining);
		bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA
						  | BCM2835_SPI_CS_DMAEN);
		bcm2835_wr_fifo_count(bs, tx_remaining);
		bcm2835_wait_tx_fifo_empty(bs);
		bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_TX);
	}

	if (likely(!bs->tx_spillover)) {
		sg_dma_address(&tfr->tx_sg.sgl[0]) += bs->tx_prologue;
		sg_dma_len(&tfr->tx_sg.sgl[0])     -= bs->tx_prologue;
	} else {
		sg_dma_len(&tfr->tx_sg.sgl[0])      = 0;
		sg_dma_address(&tfr->tx_sg.sgl[1]) += 4;
		sg_dma_len(&tfr->tx_sg.sgl[1])     -= 4;
	}
}
コード例 #18
0
int map_ablkcipher_request(struct device *dev, struct ablkcipher_request *req)
{
	struct ablkcipher_req_ctx *areq_ctx = ablkcipher_request_ctx(req);
	unsigned int iv_size = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
	struct sg_data_array sg_data;
	struct buff_mgr_handle *buff_mgr = crypto_drvdata->buff_mgr_handle;
	int dummy = 0;
	int rc = 0;

	areq_ctx->sec_dir = 0;
	areq_ctx->dma_buf_type = DX_DMA_BUF_DLLI;
	mlli_params->curr_pool = NULL;
	sg_data.num_of_sg = 0;

	/* Map IV buffer */
	if (likely(iv_size != 0) ) {
		dump_byte_array("iv", (uint8_t *)req->info, iv_size);
		areq_ctx->gen_ctx.iv_dma_addr =
			dma_map_single(dev, (void *)req->info,
				       iv_size, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(dev,
					areq_ctx->gen_ctx.iv_dma_addr))) {
			DX_LOG_ERR("Mapping iv %u B at va=0x%08lX "
				   "for DMA failed\n",iv_size,
				    (unsigned long)req->info);
			return -ENOMEM;
		}
		DX_LOG_DEBUG("Mapped iv %u B at va=0x%08lX to dma=0x%08lX\n",
				iv_size, (unsigned long)req->info,
			     (unsigned long)areq_ctx->gen_ctx.iv_dma_addr);
	} else {
		areq_ctx->gen_ctx.iv_dma_addr = 0;
	}

	/* Map the src sg */
	if ( sg_is_last(req->src) &&
	     (sg_page(req->src) == NULL) &&
	     sg_dma_address(req->src)) {
		/* The source is secure no mapping is needed */
		areq_ctx->sec_dir = DX_SRC_DMA_IS_SECURE;
		areq_ctx->in_nents = 1;
	} else {
		if ( unlikely( dx_map_sg( dev,req->src, req->nbytes,
					  DMA_BIDIRECTIONAL,
					  &areq_ctx->in_nents,
					  LLI_MAX_NUM_OF_DATA_ENTRIES,
					  &dummy))){
			rc = -ENOMEM;
			goto fail_unmap_iv;
		}

		if ( areq_ctx->in_nents > 1 ) {
			areq_ctx->dma_buf_type = DX_DMA_BUF_MLLI;
		}
	}

	if ( unlikely(req->src == req->dst)) {
		if ( areq_ctx->sec_dir == DX_SRC_DMA_IS_SECURE ) {
			DX_LOG_ERR("Secure key inplace operation "
				   "is not supported \n");
			/* both sides are secure */
			rc = -ENOMEM;
			goto fail_unmap_din;
		}
		/* Handle inplace operation */
		if ( unlikely(areq_ctx->dma_buf_type == DX_DMA_BUF_MLLI) ) {
			areq_ctx->out_nents = 0;
			buffer_mgr_set_sg_entry(&sg_data,
						areq_ctx->in_nents,
						req->src,
						req->nbytes,
						true);
		}
	} else {
		if ( sg_is_last(req->dst) &&
		     (sg_page(req->dst) == NULL) &&
		     sg_dma_address(req->dst)) {
			if ( areq_ctx->sec_dir == DX_SRC_DMA_IS_SECURE ) {
				DX_LOG_ERR("Secure key in both sides is"
					   "not supported \n");
				/* both sides are secure */
				rc = -ENOMEM;
				goto fail_unmap_din;
			}
			/* The dest is secure no mapping is needed */
			areq_ctx->sec_dir = DX_DST_DMA_IS_SECURE;
			areq_ctx->out_nents = 1;
		} else {
			/* Map the dst sg */
			if ( unlikely( dx_map_sg(dev,req->dst, req->nbytes,
						 DMA_BIDIRECTIONAL,
						 &areq_ctx->out_nents,
						 LLI_MAX_NUM_OF_DATA_ENTRIES,
						 &dummy))){
				rc = -ENOMEM;
				goto fail_unmap_din;
			}

			if ( areq_ctx->out_nents > 1 ) {
				areq_ctx->dma_buf_type = DX_DMA_BUF_MLLI;
			}
		}
		if ( unlikely( (areq_ctx->dma_buf_type == DX_DMA_BUF_MLLI) ) ) {
			if (areq_ctx->sec_dir != DX_SRC_DMA_IS_SECURE) {
				buffer_mgr_set_sg_entry(&sg_data,
							areq_ctx->in_nents,
							req->src,
							req->nbytes,
							true);
			}
			if (areq_ctx->sec_dir != DX_DST_DMA_IS_SECURE) {
				buffer_mgr_set_sg_entry(&sg_data,
							areq_ctx->out_nents,
							req->dst,
							req->nbytes,
							true);
			}
		} /*few entries */
	} /* !inplace */

	if (unlikely(areq_ctx->dma_buf_type == DX_DMA_BUF_MLLI)) {
#if (DX_DEV_SIGNATURE == DX_CC441P_SIG)
		if (areq_ctx->sec_dir) {
			/* one of the sides is secure, can't use MLLI*/
			rc = -EINVAL;
			goto fail_unmap_dout;
		}
#endif
		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
		if (unlikely(buffer_mgr_build_mlli(dev, &sg_data, mlli_params))) {
			rc = -ENOMEM;
			goto fail_unmap_dout;
		}
	} /*MLLI case*/

	DX_LOG_DEBUG(" buf type = %s \n",
		     dx_get_buff_type(areq_ctx->dma_buf_type));
	return 0;
fail_unmap_dout:
	if (areq_ctx->sec_dir != DX_DST_DMA_IS_SECURE) {
		dma_unmap_sg(dev, req->dst,
			     areq_ctx->out_nents, DMA_BIDIRECTIONAL);
	}
fail_unmap_din:
	if (areq_ctx->sec_dir != DX_SRC_DMA_IS_SECURE) {
		dma_unmap_sg(dev, req->src,
			     areq_ctx->in_nents, DMA_BIDIRECTIONAL);
	}
fail_unmap_iv:
	if (areq_ctx->gen_ctx.iv_dma_addr != 0) {
		dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
				 iv_size, DMA_TO_DEVICE);
	}
	return rc;
}
コード例 #19
0
static int qat_rsa_dec(struct akcipher_request *req)
{
	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
	struct qat_crypto_instance *inst = ctx->inst;
	struct device *dev = &GET_DEV(inst->accel_dev);
	struct qat_asym_request *qat_req =
			PTR_ALIGN(akcipher_request_ctx(req), 64);
	struct icp_qat_fw_pke_request *msg = &qat_req->req;
	int ret, ctr = 0;

	if (unlikely(!ctx->n || !ctx->d))
		return -EINVAL;

	if (req->dst_len < ctx->key_sz) {
		req->dst_len = ctx->key_sz;
		return -EOVERFLOW;
	}
	memset(msg, '\0', sizeof(*msg));
	ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
					  ICP_QAT_FW_COMN_REQ_FLAG_SET);
	msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
		qat_rsa_dec_fn_id_crt(ctx->key_sz) :
		qat_rsa_dec_fn_id(ctx->key_sz);
	if (unlikely(!msg->pke_hdr.cd_pars.func_id))
		return -EINVAL;

	qat_req->cb = qat_rsa_cb;
	qat_req->ctx.rsa = ctx;
	qat_req->areq.rsa = req;
	msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
	msg->pke_hdr.comn_req_flags =
		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
					    QAT_COMN_CD_FLD_TYPE_64BIT_ADR);

	if (ctx->crt_mode) {
		qat_req->in.rsa.dec_crt.p = ctx->dma_p;
		qat_req->in.rsa.dec_crt.q = ctx->dma_q;
		qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
		qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
		qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
	} else {
		qat_req->in.rsa.dec.d = ctx->dma_d;
		qat_req->in.rsa.dec.n = ctx->dma_n;
	}
	ret = -ENOMEM;

	/*
	 * src can be of any size in valid range, but HW expects it to be the
	 * same as modulo n so in case it is different we need to allocate a
	 * new buf and copy src data.
	 * In other case we just need to map the user provided buffer.
	 * Also need to make sure that it is in contiguous buffer.
	 */
	if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
		qat_req->src_align = NULL;
		qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
						   req->dst_len, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
			return ret;

	} else {
		int shift = ctx->key_sz - req->src_len;

		qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
							 &qat_req->in.rsa.dec.c,
							 GFP_KERNEL);
		if (unlikely(!qat_req->src_align))
			return ret;

		scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
					 0, req->src_len, 0);
	}
	if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
		qat_req->dst_align = NULL;
		qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
						    req->dst_len,
						    DMA_FROM_DEVICE);

		if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
			goto unmap_src;

	} else {
		qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
							 &qat_req->out.rsa.dec.m,
							 GFP_KERNEL);
		if (unlikely(!qat_req->dst_align))
			goto unmap_src;

	}

	if (ctx->crt_mode)
		qat_req->in.rsa.in_tab[6] = 0;
	else
		qat_req->in.rsa.in_tab[3] = 0;
	qat_req->out.rsa.out_tab[1] = 0;
	qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
					 sizeof(struct qat_rsa_input_params),
					 DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
		goto unmap_dst;

	qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
					  sizeof(struct qat_rsa_output_params),
					  DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
		goto unmap_in_params;

	msg->pke_mid.src_data_addr = qat_req->phy_in;
	msg->pke_mid.dest_data_addr = qat_req->phy_out;
	msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
	if (ctx->crt_mode)
		msg->input_param_count = 6;
	else
		msg->input_param_count = 3;

	msg->output_param_count = 1;
	do {
		ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
	} while (ret == -EBUSY && ctr++ < 100);

	if (!ret)
		return -EINPROGRESS;

	if (!dma_mapping_error(dev, qat_req->phy_out))
		dma_unmap_single(dev, qat_req->phy_out,
				 sizeof(struct qat_rsa_output_params),
				 DMA_TO_DEVICE);
unmap_in_params:
	if (!dma_mapping_error(dev, qat_req->phy_in))
		dma_unmap_single(dev, qat_req->phy_in,
				 sizeof(struct qat_rsa_input_params),
				 DMA_TO_DEVICE);
unmap_dst:
	if (qat_req->dst_align)
		dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
				  qat_req->out.rsa.dec.m);
	else
		if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
			dma_unmap_single(dev, qat_req->out.rsa.dec.m,
					 ctx->key_sz, DMA_FROM_DEVICE);
unmap_src:
	if (qat_req->src_align)
		dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
				  qat_req->in.rsa.dec.c);
	else
		if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
			dma_unmap_single(dev, qat_req->in.rsa.dec.c,
					 ctx->key_sz, DMA_TO_DEVICE);
	return ret;
}
コード例 #20
0
static int __driver_rfc4106_encrypt(struct aead_request *req)
{
	u8 one_entry_in_sg = 0;
	u8 *src, *dst, *assoc;
	__be32 counter = cpu_to_be32(1);
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
	void *aes_ctx = &(ctx->aes_key_expanded);
	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
	u8 iv_tab[16+AESNI_ALIGN];
	u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
	struct scatter_walk src_sg_walk;
	struct scatter_walk assoc_sg_walk;
	struct scatter_walk dst_sg_walk;
	unsigned int i;

	/*                                                    */
	/*                                                       */
	/*                  */
	if (unlikely(req->assoclen != 8 && req->assoclen != 12))
		return -EINVAL;
	/*                */
	for (i = 0; i < 4; i++)
		*(iv+i) = ctx->nonce[i];
	for (i = 0; i < 8; i++)
		*(iv+4+i) = req->iv[i];
	*((__be32 *)(iv+12)) = counter;

	if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
		one_entry_in_sg = 1;
		scatterwalk_start(&src_sg_walk, req->src);
		scatterwalk_start(&assoc_sg_walk, req->assoc);
		src = scatterwalk_map(&src_sg_walk);
		assoc = scatterwalk_map(&assoc_sg_walk);
		dst = src;
		if (unlikely(req->src != req->dst)) {
			scatterwalk_start(&dst_sg_walk, req->dst);
			dst = scatterwalk_map(&dst_sg_walk);
		}

	} else {
		/*                                     */
		src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
			GFP_ATOMIC);
		if (unlikely(!src))
			return -ENOMEM;
		assoc = (src + req->cryptlen + auth_tag_len);
		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
		scatterwalk_map_and_copy(assoc, req->assoc, 0,
					req->assoclen, 0);
		dst = src;
	}

	aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
		ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
		+ ((unsigned long)req->cryptlen), auth_tag_len);

	/*                                                                
                        */
	if (one_entry_in_sg) {
		if (unlikely(req->src != req->dst)) {
			scatterwalk_unmap(dst);
			scatterwalk_done(&dst_sg_walk, 0, 0);
		}
		scatterwalk_unmap(src);
		scatterwalk_unmap(assoc);
		scatterwalk_done(&src_sg_walk, 0, 0);
		scatterwalk_done(&assoc_sg_walk, 0, 0);
	} else {
		scatterwalk_map_and_copy(dst, req->dst, 0,
			req->cryptlen + auth_tag_len, 1);
		kfree(src);
	}
	return 0;
}
コード例 #21
0
ファイル: aesni-intel_glue.c プロジェクト: EMFPGA/linux_media
static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
			  u8 *hash_subkey, u8 *iv, void *aes_ctx)
{
	u8 one_entry_in_sg = 0;
	u8 *src, *dst, *assoc;
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
	struct scatter_walk src_sg_walk;
	struct scatter_walk dst_sg_walk = {};

	if (sg_is_last(req->src) &&
	    (!PageHighMem(sg_page(req->src)) ||
	    req->src->offset + req->src->length <= PAGE_SIZE) &&
	    sg_is_last(req->dst) &&
	    (!PageHighMem(sg_page(req->dst)) ||
	    req->dst->offset + req->dst->length <= PAGE_SIZE)) {
		one_entry_in_sg = 1;
		scatterwalk_start(&src_sg_walk, req->src);
		assoc = scatterwalk_map(&src_sg_walk);
		src = assoc + req->assoclen;
		dst = src;
		if (unlikely(req->src != req->dst)) {
			scatterwalk_start(&dst_sg_walk, req->dst);
			dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
		}
	} else {
		/* Allocate memory for src, dst, assoc */
		assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
			GFP_ATOMIC);
		if (unlikely(!assoc))
			return -ENOMEM;
		scatterwalk_map_and_copy(assoc, req->src, 0,
					 req->assoclen + req->cryptlen, 0);
		src = assoc + req->assoclen;
		dst = src;
	}

	kernel_fpu_begin();
	aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
			  hash_subkey, assoc, assoclen,
			  dst + req->cryptlen, auth_tag_len);
	kernel_fpu_end();

	/* The authTag (aka the Integrity Check Value) needs to be written
	 * back to the packet. */
	if (one_entry_in_sg) {
		if (unlikely(req->src != req->dst)) {
			scatterwalk_unmap(dst - req->assoclen);
			scatterwalk_advance(&dst_sg_walk, req->dst->length);
			scatterwalk_done(&dst_sg_walk, 1, 0);
		}
		scatterwalk_unmap(assoc);
		scatterwalk_advance(&src_sg_walk, req->src->length);
		scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
	} else {
		scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
					 req->cryptlen + auth_tag_len, 1);
		kfree(assoc);
	}
	return 0;
}
コード例 #22
0
/*
 * mpi_read_raw_from_sgl() - Function allocates an MPI and populates it with
 *			     data from the sgl
 *
 * This function works in the same way as the mpi_read_raw_data, but it
 * takes an sgl instead of void * buffer. i.e. it allocates
 * a new MPI and reads the content of the sgl to the MPI.
 *
 * @sgl:	scatterlist to read from
 * @len:	number of bytes to read
 *
 * Return:	Pointer to a new MPI or NULL on error
 */
MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len)
{
	struct scatterlist *sg;
	int x, i, j, z, lzeros, ents;
	unsigned int nbits, nlimbs, nbytes;
	mpi_limb_t a;
	MPI val = NULL;

	lzeros = 0;
	ents = sg_nents(sgl);

	for_each_sg(sgl, sg, ents, i) {
		const u8 *buff = sg_virt(sg);
		int len = sg->length;

		while (len && !*buff) {
			lzeros++;
			len--;
			buff++;
		}

		if (len && *buff)
			break;

		ents--;
		lzeros = 0;
	}

	sgl = sg;

	if (!ents)
		nbytes = 0;
	else
		nbytes = len - lzeros;

	nbits = nbytes * 8;
	if (nbits > MAX_EXTERN_MPI_BITS) {
		pr_info("MPI: mpi too large (%u bits)\n", nbits);
		return NULL;
	}

	if (nbytes > 0)
		nbits -= count_leading_zeros(*(u8 *)(sg_virt(sgl) + lzeros));
	else
		nbits = 0;

	nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB);
	val = mpi_alloc(nlimbs);
	if (!val)
		return NULL;

	val->nbits = nbits;
	val->sign = 0;
	val->nlimbs = nlimbs;

	if (nbytes == 0)
		return val;

	j = nlimbs - 1;
	a = 0;
	z = 0;
	x = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
	x %= BYTES_PER_MPI_LIMB;

	for_each_sg(sgl, sg, ents, i) {
		const u8 *buffer = sg_virt(sg) + lzeros;
		int len = sg->length - lzeros;
		int buf_shift = x;

		if  (sg_is_last(sg) && (len % BYTES_PER_MPI_LIMB))
			len += BYTES_PER_MPI_LIMB - (len % BYTES_PER_MPI_LIMB);

		for (; x < len + buf_shift; x++) {
			a <<= 8;
			a |= *buffer++;
			if (((z + x + 1) % BYTES_PER_MPI_LIMB) == 0) {
				val->d[j--] = a;
				a = 0;
			}
		}
		z += x;
		x = 0;
		lzeros = 0;
	}
	return val;
}
コード例 #23
0
ファイル: aesni-intel_glue.c プロジェクト: EMFPGA/linux_media
static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
			  u8 *hash_subkey, u8 *iv, void *aes_ctx)
{
	u8 one_entry_in_sg = 0;
	u8 *src, *dst, *assoc;
	unsigned long tempCipherLen = 0;
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
	u8 authTag[16];
	struct scatter_walk src_sg_walk;
	struct scatter_walk dst_sg_walk = {};
	int retval = 0;

	tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);

	if (sg_is_last(req->src) &&
	    (!PageHighMem(sg_page(req->src)) ||
	    req->src->offset + req->src->length <= PAGE_SIZE) &&
	    sg_is_last(req->dst) &&
	    (!PageHighMem(sg_page(req->dst)) ||
	    req->dst->offset + req->dst->length <= PAGE_SIZE)) {
		one_entry_in_sg = 1;
		scatterwalk_start(&src_sg_walk, req->src);
		assoc = scatterwalk_map(&src_sg_walk);
		src = assoc + req->assoclen;
		dst = src;
		if (unlikely(req->src != req->dst)) {
			scatterwalk_start(&dst_sg_walk, req->dst);
			dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
		}
	} else {
		/* Allocate memory for src, dst, assoc */
		assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
		if (!assoc)
			return -ENOMEM;
		scatterwalk_map_and_copy(assoc, req->src, 0,
					 req->assoclen + req->cryptlen, 0);
		src = assoc + req->assoclen;
		dst = src;
	}


	kernel_fpu_begin();
	aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
			  hash_subkey, assoc, assoclen,
			  authTag, auth_tag_len);
	kernel_fpu_end();

	/* Compare generated tag with passed in tag. */
	retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
		-EBADMSG : 0;

	if (one_entry_in_sg) {
		if (unlikely(req->src != req->dst)) {
			scatterwalk_unmap(dst - req->assoclen);
			scatterwalk_advance(&dst_sg_walk, req->dst->length);
			scatterwalk_done(&dst_sg_walk, 1, 0);
		}
		scatterwalk_unmap(assoc);
		scatterwalk_advance(&src_sg_walk, req->src->length);
		scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
	} else {
		scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
					 tempCipherLen, 1);
		kfree(assoc);
	}
	return retval;

}
コード例 #24
0
int AesProcessScatterGather( 
	struct scatterlist* sg_src,
	struct scatterlist* sg_dst,
	uint32_t TransCount,
	uint8_t*	Key,
	uint8_t*	IV,
	uint32_t	aes_mode
	)
{
	int i=1;
	int j=1;
	int bUserData = 0;
	struct scatterlist *next_dst, *next_src;
	u8* Src0, *Dst0, *Dst1;
	struct AES_txdesc* txdesc = NULL;
	struct AES_rxdesc* rxdesc = NULL;
	unsigned int aes_tx_scatter = 0;
	unsigned int aes_rx_gather = 0;
	unsigned int keylen = aes_key_len[aes_mode&0x03];
	unsigned long flags = 0;
	int rx_remain_len = 0;
	int rx_desc_userdata = 0;
	
	next_src = sg_src;
	next_dst = sg_dst;
	
	if (aes_irq_enabled == 0)	
		spin_lock_irqsave(&(AES_Entry.page_lock), flags);
	//start_clk = read_c0_count();
	
	while(next_src->length==0)
	{
		if(!sg_is_last(next_src))
			next_src = sg_next(next_src);
		else
		{	
			printk("last src length=0\n");
			goto EXIT;
		}	
	}
	while(next_dst->length==0)
	{
		if(!sg_is_last(next_dst))
			next_dst = sg_next(next_dst);
		else
		{	
			printk("last dst length=0\n");
			goto EXIT;
		}	
	}
	
	Src0 = sg_virt(next_src);
	Dst0 = sg_virt(next_dst);		

	//RX descriptor
	while(1)
	{
		aes_rx_gather  = (aes_rx_rear_idx + j) % NUM_AES_RX_DESC;

		if (bUserData == 0)
		{	
			rxdesc = &AES_Entry.AES_rx_ring0[aes_rx_gather];
			DBGPRINT(DBG_HIGH, "AES set RX Desc[%u] Dst0=%08X, len=%d\n",(u32)aes_rx_gather, (u32)Dst0, next_dst->length);
			
	        if (((u32)Dst0&0x3) || (next_dst->length&0x3))
			{
				bUserData = 1;
				rx_desc_userdata = aes_rx_gather;
				AES_userdata[aes_rx_gather].orig_SDP0 = (u32)Dst0;
		        AES_userdata[aes_rx_gather].orig_SDL = next_dst->length;
				rx_remain_len += next_dst->length;
			}
			else
				AES_userdata[aes_rx_gather].new_SDP0 = 0;
	
			if (bUserData == 0)
			{	
				rxdesc->SDP0 = dma_map_single(NULL, Dst0, next_dst->length, PCI_DMA_FROMDEVICE);
				rxdesc->aes_rxd_info2.SDL0 = next_dst->length;
				rxdesc->aes_rxd_info2.DDONE = 0;
	
	 		}
		}
		else
		{	
			AES_userdata[aes_rx_gather].orig_SDP0 = (u32)Dst0;
	        AES_userdata[aes_rx_gather].orig_SDL = next_dst->length;
			rx_remain_len += next_dst->length;
		}		
		if (sg_is_last(next_dst))
		{ 
			if (bUserData == 0)
				rxdesc->aes_rxd_info2.LS0 = 1;
			break;
		}
		else
		{
			if (bUserData == 0)
				rxdesc->aes_rxd_info2.LS0 = 0;//1;			
		}
		
		next_dst = sg_next(next_dst);
		Dst0 = sg_virt(next_dst);
		j++;
	}	  
  	
  	if (bUserData == 0)
		aes_rx_rear_idx = aes_rx_gather;
	else
	{
		int new_SDL = rx_remain_len;
        int rx_desc_start = rx_desc_userdata;
        int remain_SDL = new_SDL;

       	while (remain_SDL > 0)
		{
	        int SDL = (remain_SDL > (16384-4)) ? 16380 : remain_SDL;

			Dst1 = kmalloc(SDL, GFP_DMA|GFP_ATOMIC);
			DBGPRINT(DBG_HIGH, "RxDesc[%u] realloc len %d (%08X)\n", (u32)rx_desc_start, (u32)SDL, (u32)Dst1);
			if (Dst1)
			{	
       			rxdesc = &AES_Entry.AES_rx_ring0[rx_desc_start];
				
	        	rxdesc->SDP0 = dma_map_single(NULL, Dst1, SDL, PCI_DMA_FROMDEVICE);
				rxdesc->aes_rxd_info2.SDL0 = SDL;
				rxdesc->aes_rxd_info2.DDONE = 0;
				rxdesc->aes_rxd_info2.LS0 = 0;
	        	
				AES_userdata[rx_desc_start].new_SDP0 = (u32)Dst1;
				AES_userdata[rx_desc_start].new_SDL = SDL;
				aes_rx_rear_idx = rx_desc_start;				

				remain_SDL-=SDL;
				rx_desc_start = (rx_desc_start + 1) % NUM_AES_RX_DESC;
			}
			else
			{
				printk("Can't alloc AES Engine bounce buffer\n");
				AES_userdata[rx_desc_start].new_SDP0 = 0;
				//need free previous kmalloc buffer
				break;
			}
		}
		rxdesc->aes_rxd_info2.LS0 = 1;
	}			
  	
	while(1)
	{	
		aes_tx_scatter = (aes_tx_rear_idx + i) % NUM_AES_TX_DESC;
		DBGPRINT(DBG_HIGH, "AES set TX Desc[%u] Src0=%08X len=%d klen=%d\n",aes_tx_scatter, (u32)Src0, next_src->length,keylen);	
		txdesc = &AES_Entry.AES_tx_ring0[aes_tx_scatter];
		
		txdesc->aes_txd_info4.value = aes_mode;

		{	
			if (aes_mode&CBC_MODE)
			{	
				txdesc->aes_txd_info4.value |= VALID_IV;
				txdesc->aes_txd_info4.value |= RESTORE_IV;
				if (i > 1)
					txdesc->aes_txd_info4.value &= ~VALID_IV;
					
				//first tx, set Key and IV
				if (txdesc->aes_txd_info4.value & VALID_IV)
				{	
					if (IV == NULL)
						memset((void*)txdesc->IV, 0xFF, sizeof(uint32_t)*4);
					else
						memcpy((void*)txdesc->IV, IV, sizeof(uint32_t)*4);
				}	
			}
			
			if (i==1)
			{	
				txdesc->SDP0 = ((uint32_t)Key & 0x1FFFFFFF);
				if (bUserData)
				{	
					txdesc->aes_txd_info4.value |= CARRY_USERDATA;
					*(uint32_t*)&Key[keylen] = 	(uint32_t)AES_userdata;
					txdesc->aes_txd_info2.SDL0 = ((keylen>>2)+1)*sizeof(uint32_t);	/* KEY + USER_DATA */
				}
				else
					txdesc->aes_txd_info2.SDL0 = ((keylen>>2))*sizeof(uint32_t);	/* KEY */
			}
			else
コード例 #25
0
static int qat_dh_compute_value(struct kpp_request *req)
{
	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
	struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
	struct qat_crypto_instance *inst = ctx->inst;
	struct device *dev = &GET_DEV(inst->accel_dev);
	struct qat_asym_request *qat_req =
			PTR_ALIGN(kpp_request_ctx(req), 64);
	struct icp_qat_fw_pke_request *msg = &qat_req->req;
	int ret, ctr = 0;
	int n_input_params = 0;

	if (unlikely(!ctx->xa))
		return -EINVAL;

	if (req->dst_len < ctx->p_size) {
		req->dst_len = ctx->p_size;
		return -EOVERFLOW;
	}
	memset(msg, '\0', sizeof(*msg));
	ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
					  ICP_QAT_FW_COMN_REQ_FLAG_SET);

	msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
						    !req->src && ctx->g2);
	if (unlikely(!msg->pke_hdr.cd_pars.func_id))
		return -EINVAL;

	qat_req->cb = qat_dh_cb;
	qat_req->ctx.dh = ctx;
	qat_req->areq.dh = req;
	msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
	msg->pke_hdr.comn_req_flags =
		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
					    QAT_COMN_CD_FLD_TYPE_64BIT_ADR);

	/*
	 * If no source is provided use g as base
	 */
	if (req->src) {
		qat_req->in.dh.in.xa = ctx->dma_xa;
		qat_req->in.dh.in.p = ctx->dma_p;
		n_input_params = 3;
	} else {
		if (ctx->g2) {
			qat_req->in.dh.in_g2.xa = ctx->dma_xa;
			qat_req->in.dh.in_g2.p = ctx->dma_p;
			n_input_params = 2;
		} else {
			qat_req->in.dh.in.b = ctx->dma_g;
			qat_req->in.dh.in.xa = ctx->dma_xa;
			qat_req->in.dh.in.p = ctx->dma_p;
			n_input_params = 3;
		}
	}

	ret = -ENOMEM;
	if (req->src) {
		/*
		 * src can be of any size in valid range, but HW expects it to
		 * be the same as modulo p so in case it is different we need
		 * to allocate a new buf and copy src data.
		 * In other case we just need to map the user provided buffer.
		 * Also need to make sure that it is in contiguous buffer.
		 */
		if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
			qat_req->src_align = NULL;
			qat_req->in.dh.in.b = dma_map_single(dev,
							     sg_virt(req->src),
							     req->src_len,
							     DMA_TO_DEVICE);
			if (unlikely(dma_mapping_error(dev,
						       qat_req->in.dh.in.b)))
				return ret;

		} else {
			int shift = ctx->p_size - req->src_len;

			qat_req->src_align = dma_zalloc_coherent(dev,
								 ctx->p_size,
								 &qat_req->in.dh.in.b,
								 GFP_KERNEL);
			if (unlikely(!qat_req->src_align))
				return ret;

			scatterwalk_map_and_copy(qat_req->src_align + shift,
						 req->src, 0, req->src_len, 0);
		}
	}
	/*
	 * dst can be of any size in valid range, but HW expects it to be the
	 * same as modulo m so in case it is different we need to allocate a
	 * new buf and copy src data.
	 * In other case we just need to map the user provided buffer.
	 * Also need to make sure that it is in contiguous buffer.
	 */
	if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
		qat_req->dst_align = NULL;
		qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
						   req->dst_len,
						   DMA_FROM_DEVICE);

		if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
			goto unmap_src;

	} else {
		qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size,
							 &qat_req->out.dh.r,
							 GFP_KERNEL);
		if (unlikely(!qat_req->dst_align))
			goto unmap_src;
	}

	qat_req->in.dh.in_tab[n_input_params] = 0;
	qat_req->out.dh.out_tab[1] = 0;
	/* Mapping in.in.b or in.in_g2.xa is the same */
	qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
					 sizeof(struct qat_dh_input_params),
					 DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
		goto unmap_dst;

	qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
					  sizeof(struct qat_dh_output_params),
					  DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
		goto unmap_in_params;

	msg->pke_mid.src_data_addr = qat_req->phy_in;
	msg->pke_mid.dest_data_addr = qat_req->phy_out;
	msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
	msg->input_param_count = n_input_params;
	msg->output_param_count = 1;

	do {
		ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
	} while (ret == -EBUSY && ctr++ < 100);

	if (!ret)
		return -EINPROGRESS;

	if (!dma_mapping_error(dev, qat_req->phy_out))
		dma_unmap_single(dev, qat_req->phy_out,
				 sizeof(struct qat_dh_output_params),
				 DMA_TO_DEVICE);
unmap_in_params:
	if (!dma_mapping_error(dev, qat_req->phy_in))
		dma_unmap_single(dev, qat_req->phy_in,
				 sizeof(struct qat_dh_input_params),
				 DMA_TO_DEVICE);
unmap_dst:
	if (qat_req->dst_align)
		dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
				  qat_req->out.dh.r);
	else
		if (!dma_mapping_error(dev, qat_req->out.dh.r))
			dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
					 DMA_FROM_DEVICE);
unmap_src:
	if (req->src) {
		if (qat_req->src_align)
			dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
					  qat_req->in.dh.in.b);
		else
			if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
				dma_unmap_single(dev, qat_req->in.dh.in.b,
						 ctx->p_size,
						 DMA_TO_DEVICE);
	}
	return ret;
}