static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
			    struct scatterlist    *dst,
			    struct scatterlist    *src,
			    unsigned int           nbytes,
			    int                    enc)
{
	struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	unsigned long irq_flags;
	unsigned int processed = 0, to_process;
	u32 max_sg_len;
	int rc;

	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

	max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
			   nx_ctx->ap->sglen);

	if (enc)
		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
	else
		NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;

	do {
		to_process = min_t(u64, nbytes - processed,
				   nx_ctx->ap->databytelen);
		to_process = min_t(u64, to_process,
				   NX_PAGE_SIZE * (max_sg_len - 1));
		to_process = to_process & ~(AES_BLOCK_SIZE - 1);

		rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process,
				processed, NULL);
		if (rc)
			goto out;

		if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
			rc = -EINVAL;
			goto out;
		}

		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
				   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
		if (rc)
			goto out;

		atomic_inc(&(nx_ctx->stats->aes_ops));
		atomic64_add(csbcpb->csb.processed_byte_count,
			     &(nx_ctx->stats->aes_bytes));

		processed += to_process;
	} while (processed < nbytes);

out:
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
	return rc;
}
Beispiel #2
0
static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
			    struct scatterlist    *dst,
			    struct scatterlist    *src,
			    unsigned int           nbytes,
			    int                    enc)
{
	struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	int rc;

	if (nbytes > nx_ctx->ap->databytelen)
		return -EINVAL;

	if (enc)
		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
	else
		NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;

	rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, NULL);
	if (rc)
		goto out;

	if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
		rc = -EINVAL;
		goto out;
	}

	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
	if (rc)
		goto out;

	atomic_inc(&(nx_ctx->stats->aes_ops));
	atomic64_add(csbcpb->csb.processed_byte_count,
		     &(nx_ctx->stats->aes_bytes));
out:
	return rc;
}
Beispiel #3
0
static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
			    unsigned int len)
{
	struct sha256_state *sctx = shash_desc_ctx(desc);
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
	struct nx_sg *in_sg;
	u64 to_process = 0, leftover, total;
	unsigned long irq_flags;
	int rc = 0;
	int data_len;
	u32 max_sg_len;
	u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);

	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

	/* 2 cases for total data len:
	 *  1: < SHA256_BLOCK_SIZE: copy into state, return 0
	 *  2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
	 */
	total = (sctx->count % SHA256_BLOCK_SIZE) + len;
	if (total < SHA256_BLOCK_SIZE) {
		memcpy(sctx->buf + buf_len, data, len);
		sctx->count += len;
		goto out;
	}

	memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE);
	NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
	NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;

	in_sg = nx_ctx->in_sg;
	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
	max_sg_len = min_t(u64, max_sg_len,
			nx_ctx->ap->databytelen/NX_PAGE_SIZE);

	do {
		/*
		 * to_process: the SHA256_BLOCK_SIZE data chunk to process in
		 * this update. This value is also restricted by the sg list
		 * limits.
		 */
		to_process = total - to_process;
		to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);

		if (buf_len) {
			data_len = buf_len;
			in_sg = nx_build_sg_list(nx_ctx->in_sg,
						 (u8 *) sctx->buf,
						 &data_len,
						 max_sg_len);

			if (data_len != buf_len) {
				rc = -EINVAL;
				goto out;
			}
		}

		data_len = to_process - buf_len;
		in_sg = nx_build_sg_list(in_sg, (u8 *) data,
					 &data_len, max_sg_len);

		nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);

		to_process = (data_len + buf_len);
		leftover = total - to_process;

		/*
		 * we've hit the nx chip previously and we're updating
		 * again, so copy over the partial digest.
		 */
		memcpy(csbcpb->cpb.sha256.input_partial_digest,
			       csbcpb->cpb.sha256.message_digest,
			       SHA256_DIGEST_SIZE);

		if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
			rc = -EINVAL;
			goto out;
		}

		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
				   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
		if (rc)
			goto out;

		atomic_inc(&(nx_ctx->stats->sha256_ops));

		total -= to_process;
		data += to_process - buf_len;
		buf_len = 0;

	} while (leftover >= SHA256_BLOCK_SIZE);

	/* copy the leftover back into the state struct */
	if (leftover)
		memcpy(sctx->buf, data, leftover);

	sctx->count += len;
	memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
out:
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
	return rc;
}
Beispiel #4
0
static int nx_sha256_final(struct shash_desc *desc, u8 *out)
{
	struct sha256_state *sctx = shash_desc_ctx(desc);
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
	struct nx_sg *in_sg, *out_sg;
	unsigned long irq_flags;
	u32 max_sg_len;
	int rc = 0;
	int len;

	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
	max_sg_len = min_t(u64, max_sg_len,
			nx_ctx->ap->databytelen/NX_PAGE_SIZE);

	/* final is represented by continuing the operation and indicating that
	 * this is not an intermediate operation */
	if (sctx->count >= SHA256_BLOCK_SIZE) {
		/* we've hit the nx chip previously, now we're finalizing,
		 * so copy over the partial digest */
		memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
		NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
	} else {
		NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
		NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
	}

	csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);

	len = sctx->count & (SHA256_BLOCK_SIZE - 1);
	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
				 &len, max_sg_len);

	if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
		rc = -EINVAL;
		goto out;
	}

	len = SHA256_DIGEST_SIZE;
	out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len);

	if (len != SHA256_DIGEST_SIZE) {
		rc = -EINVAL;
		goto out;
	}

	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
	if (!nx_ctx->op.outlen) {
		rc = -EINVAL;
		goto out;
	}

	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
	if (rc)
		goto out;

	atomic_inc(&(nx_ctx->stats->sha256_ops));

	atomic64_add(sctx->count, &(nx_ctx->stats->sha256_bytes));
	memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
out:
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
	return rc;
}
Beispiel #5
0
static int nx_sha512_final(struct shash_desc *desc, u8 *out)
{
	struct sha512_state *sctx = shash_desc_ctx(desc);
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
	u64 count0;
	unsigned long irq_flags;
	int rc;
	int len;

	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

	/* final is represented by continuing the operation and indicating that
	 * this is not an intermediate operation */
	if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
		/* we've hit the nx chip previously, now we're finalizing,
		 * so copy over the partial digest */
		memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state,
							SHA512_DIGEST_SIZE);
		NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
	} else {
		NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
		NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
	}

	NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;

	count0 = sctx->count[0] * 8;

	csbcpb->cpb.sha512.message_bit_length_lo = count0;

	len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
				  &nx_ctx->op.inlen,
				  &len,
				  (u8 *)sctx->buf,
				  NX_DS_SHA512);

	if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1)))
		goto out;

	len = SHA512_DIGEST_SIZE;
	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
				  &nx_ctx->op.outlen,
				  &len,
				  out,
				  NX_DS_SHA512);

	if (rc)
		goto out;

	if (!nx_ctx->op.outlen) {
		rc = -EINVAL;
		goto out;
	}

	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
	if (rc)
		goto out;

	atomic_inc(&(nx_ctx->stats->sha512_ops));
	atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes));

	memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
out:
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
	return rc;
}
Beispiel #6
0
static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
{
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	struct blkcipher_desc desc;
	unsigned int nbytes = req->cryptlen;
	int rc = -EINVAL;

	if (nbytes > nx_ctx->ap->databytelen)
		goto out;

	desc.info = nx_ctx->priv.gcm.iv;
	/* initialize the counter */
	*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;

	/* For scenarios where the input message is zero length, AES CTR mode
	 * may be used. Set the source data to be a single block (16B) of all
	 * zeros, and set the input IV value to be the same as the GMAC IV
	 * value. - nx_wb 4.8.1.3 */
	if (nbytes == 0) {
		char src[AES_BLOCK_SIZE] = {};
		struct scatterlist sg;

		desc.tfm = crypto_alloc_blkcipher("ctr(aes)", 0, 0);
		if (IS_ERR(desc.tfm)) {
			rc = -ENOMEM;
			goto out;
		}

		crypto_blkcipher_setkey(desc.tfm, csbcpb->cpb.aes_gcm.key,
			NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_128 ? 16 :
			NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_192 ? 24 : 32);

		sg_init_one(&sg, src, AES_BLOCK_SIZE);
		if (enc)
			crypto_blkcipher_encrypt_iv(&desc, req->dst, &sg,
						    AES_BLOCK_SIZE);
		else
			crypto_blkcipher_decrypt_iv(&desc, req->dst, &sg,
						    AES_BLOCK_SIZE);
		crypto_free_blkcipher(desc.tfm);

		rc = 0;
		goto out;
	}

	desc.tfm = (struct crypto_blkcipher *)req->base.tfm;

	csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8;

	if (req->assoclen) {
		rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad);
		if (rc)
			goto out;
	}

	if (enc)
		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
	else
		nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));

	csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;

	rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, req->src, nbytes,
			       csbcpb->cpb.aes_gcm.iv_or_cnt);
	if (rc)
		goto out;

	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
			   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
	if (rc)
		goto out;

	atomic_inc(&(nx_ctx->stats->aes_ops));
	atomic64_add(csbcpb->csb.processed_byte_count,
		     &(nx_ctx->stats->aes_bytes));

	if (enc) {
		/* copy out the auth tag */
		scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac,
				 req->dst, nbytes,
				 crypto_aead_authsize(crypto_aead_reqtfm(req)),
				 SCATTERWALK_TO_SG);
	} else if (req->assoclen) {
		u8 *itag = nx_ctx->priv.gcm.iauth_tag;
		u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;

		scatterwalk_map_and_copy(itag, req->dst, nbytes,
				 crypto_aead_authsize(crypto_aead_reqtfm(req)),
				 SCATTERWALK_FROM_SG);
		rc = memcmp(itag, otag,
			    crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
		     -EBADMSG : 0;
	}
out:
	return rc;
}
static int ccm_nx_encrypt(struct aead_request   *req,
			  struct blkcipher_desc *desc)
{
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	unsigned int nbytes = req->cryptlen;
	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
	unsigned long irq_flags;
	unsigned int processed = 0, to_process;
	int rc = -1;

	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
	if (rc)
		goto out;

	do {
		/* to process: the AES_BLOCK_SIZE data chunk to process in this
		 * update. This value is bound by sg list limits.
		 */
		to_process = nbytes - processed;

		if ((to_process + processed) < nbytes)
			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
		else
			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;

		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;

		rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
					&to_process, processed,
				       csbcpb->cpb.aes_ccm.iv_or_ctr);
		if (rc)
			goto out;

		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
		if (rc)
			goto out;

		/* for partial completion, copy following for next
		 * entry into loop...
		 */
		memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
		memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
			csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
		memcpy(csbcpb->cpb.aes_ccm.in_s0,
			csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);

		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;

		/* update stats */
		atomic_inc(&(nx_ctx->stats->aes_ops));
		atomic64_add(csbcpb->csb.processed_byte_count,
			     &(nx_ctx->stats->aes_bytes));

		processed += to_process;

	} while (processed < nbytes);

	/* copy out the auth tag */
	scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
				 req->dst, nbytes, authsize,
				 SCATTERWALK_TO_SG);

out:
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
	return rc;
}
static int generate_pat(u8                   *iv,
			struct aead_request  *req,
			struct nx_crypto_ctx *nx_ctx,
			unsigned int          authsize,
			unsigned int          nbytes,
			u8                   *out)
{
	struct nx_sg *nx_insg = nx_ctx->in_sg;
	struct nx_sg *nx_outsg = nx_ctx->out_sg;
	unsigned int iauth_len = 0;
	u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
	int rc;
	unsigned int max_sg_len;

	/* zero the ctr value */
	memset(iv + 15 - iv[0], 0, iv[0] + 1);

	/* page 78 of nx_wb.pdf has,
	 * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
	 * in length. If a full message is used, the AES CCA implementation
	 * restricts the maximum AAD length to 2^32 -1 bytes.
	 * If partial messages are used, the implementation supports
	 * 2^64 -1 bytes maximum AAD length.
	 *
	 * However, in the cryptoapi's aead_request structure,
	 * assoclen is an unsigned int, thus it cannot hold a length
	 * value greater than 2^32 - 1.
	 * Thus the AAD is further constrained by this and is never
	 * greater than 2^32.
	 */

	if (!req->assoclen) {
		b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
	} else if (req->assoclen <= 14) {
		/* if associated data is 14 bytes or less, we do 1 GCM
		 * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
		 * which is fed in through the source buffers here */
		b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
		b1 = nx_ctx->priv.ccm.iauth_tag;
		iauth_len = req->assoclen;
	} else if (req->assoclen <= 65280) {
		/* if associated data is less than (2^16 - 2^8), we construct
		 * B1 differently and feed in the associated data to a CCA
		 * operation */
		b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
		b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
		iauth_len = 14;
	} else {
		b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
		b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
		iauth_len = 10;
	}

	/* generate B0 */
	rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0);
	if (rc)
		return rc;

	/* generate B1:
	 * add control info for associated data
	 * RFC 3610 and NIST Special Publication 800-38C
	 */
	if (b1) {
		memset(b1, 0, 16);
		if (req->assoclen <= 65280) {
			*(u16 *)b1 = (u16)req->assoclen;
			scatterwalk_map_and_copy(b1 + 2, req->assoc, 0,
					 iauth_len, SCATTERWALK_FROM_SG);
		} else {
			*(u16 *)b1 = (u16)(0xfffe);
			*(u32 *)&b1[2] = (u32)req->assoclen;
			scatterwalk_map_and_copy(b1 + 6, req->assoc, 0,
					 iauth_len, SCATTERWALK_FROM_SG);
		}
	}

	/* now copy any remaining AAD to scatterlist and call nx... */
	if (!req->assoclen) {
		return rc;
	} else if (req->assoclen <= 14) {
		unsigned int len = 16;

		nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);

		if (len != 16)
			return -EINVAL;

		nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len,
					    nx_ctx->ap->sglen);

		if (len != 16)
			return -EINVAL;

		/* inlen should be negative, indicating to phyp that its a
		 * pointer to an sg list */
		nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
					sizeof(struct nx_sg);
		nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) *
					sizeof(struct nx_sg);

		NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
		NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;

		result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;

		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
		if (rc)
			return rc;

		atomic_inc(&(nx_ctx->stats->aes_ops));
		atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));

	} else {
		unsigned int processed = 0, to_process;

		processed += iauth_len;

		/* page_limit: number of sg entries that fit on one page */
		max_sg_len = min_t(u64, nx_ctx->ap->sglen,
				nx_driver.of.max_sg_len/sizeof(struct nx_sg));
		max_sg_len = min_t(u64, max_sg_len,
				nx_ctx->ap->databytelen/NX_PAGE_SIZE);

		do {
			to_process = min_t(u32, req->assoclen - processed,
					   nx_ctx->ap->databytelen);

			nx_insg = nx_walk_and_build(nx_ctx->in_sg,
						    nx_ctx->ap->sglen,
						    req->assoc, processed,
						    &to_process);

			if ((to_process + processed) < req->assoclen) {
				NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
					NX_FDM_INTERMEDIATE;
			} else {
				NX_CPB_FDM(nx_ctx->csbcpb_aead) &=
					~NX_FDM_INTERMEDIATE;
			}


			nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
						sizeof(struct nx_sg);

			result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;

			rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
			if (rc)
				return rc;

			memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0,
				nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0,
				AES_BLOCK_SIZE);

			NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;

			atomic_inc(&(nx_ctx->stats->aes_ops));
			atomic64_add(req->assoclen,
					&(nx_ctx->stats->aes_bytes));

			processed += to_process;
		} while (processed < req->assoclen);

		result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
	}

	memcpy(out, result, AES_BLOCK_SIZE);

	return rc;
}
Beispiel #9
0
static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
			    unsigned int assoclen)
{
	struct nx_crypto_ctx *nx_ctx =
		crypto_aead_ctx(crypto_aead_reqtfm(req));
	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	struct blkcipher_desc desc;
	unsigned int nbytes = req->cryptlen;
	unsigned int processed = 0, to_process;
	unsigned long irq_flags;
	int rc = -EINVAL;

	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

	desc.info = rctx->iv;
	/* initialize the counter */
	*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;

	if (nbytes == 0) {
		if (assoclen == 0)
			rc = gcm_empty(req, &desc, enc);
		else
			rc = gmac(req, &desc, assoclen);
		if (rc)
			goto out;
		else
			goto mac;
	}

	/* Process associated data */
	csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8;
	if (assoclen) {
		rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad,
			    assoclen);
		if (rc)
			goto out;
	}

	/* Set flags for encryption */
	NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
	if (enc) {
		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
	} else {
		NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
		nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
	}

	do {
		to_process = nbytes - processed;

		csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
		rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
				       req->src, &to_process,
				       processed + req->assoclen,
				       csbcpb->cpb.aes_gcm.iv_or_cnt);

		if (rc)
			goto out;

		if ((to_process + processed) < nbytes)
			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
		else
			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;


		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
		if (rc)
			goto out;

		memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
		memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
			csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
		memcpy(csbcpb->cpb.aes_gcm.in_s0,
			csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);

		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;

		atomic_inc(&(nx_ctx->stats->aes_ops));
		atomic64_add(csbcpb->csb.processed_byte_count,
			     &(nx_ctx->stats->aes_bytes));

		processed += to_process;
	} while (processed < nbytes);

mac:
	if (enc) {
		/* copy out the auth tag */
		scatterwalk_map_and_copy(
			csbcpb->cpb.aes_gcm.out_pat_or_mac,
			req->dst, req->assoclen + nbytes,
			crypto_aead_authsize(crypto_aead_reqtfm(req)),
			SCATTERWALK_TO_SG);
	} else {
		u8 *itag = nx_ctx->priv.gcm.iauth_tag;
		u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;

		scatterwalk_map_and_copy(
			itag, req->src, req->assoclen + nbytes,
			crypto_aead_authsize(crypto_aead_reqtfm(req)),
			SCATTERWALK_FROM_SG);
		rc = memcmp(itag, otag,
			    crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
		     -EBADMSG : 0;
	}
out:
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
	return rc;
}
Beispiel #10
0
static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
		     int enc)
{
	int rc;
	struct nx_crypto_ctx *nx_ctx =
		crypto_aead_ctx(crypto_aead_reqtfm(req));
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	char out[AES_BLOCK_SIZE];
	struct nx_sg *in_sg, *out_sg;
	int len;

	/* For scenarios where the input message is zero length, AES CTR mode
	 * may be used. Set the source data to be a single block (16B) of all
	 * zeros, and set the input IV value to be the same as the GMAC IV
	 * value. - nx_wb 4.8.1.3 */

	/* Change to ECB mode */
	csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
	memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key,
			sizeof(csbcpb->cpb.aes_ecb.key));
	if (enc)
		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
	else
		NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;

	len = AES_BLOCK_SIZE;

	/* Encrypt the counter/IV */
	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
				 &len, nx_ctx->ap->sglen);

	if (len != AES_BLOCK_SIZE)
		return -EINVAL;

	len = sizeof(out);
	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len,
				  nx_ctx->ap->sglen);

	if (len != sizeof(out))
		return -EINVAL;

	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);

	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
	if (rc)
		goto out;
	atomic_inc(&(nx_ctx->stats->aes_ops));

	/* Copy out the auth tag */
	memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out,
			crypto_aead_authsize(crypto_aead_reqtfm(req)));
out:
	/* Restore XCBC mode */
	csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;

	/*
	 * ECB key uses the same region that GCM AAD and counter, so it's safe
	 * to just fill it with zeroes.
	 */
	memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key));

	return rc;
}
Beispiel #11
0
static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
		unsigned int assoclen)
{
	int rc;
	struct nx_crypto_ctx *nx_ctx =
		crypto_aead_ctx(crypto_aead_reqtfm(req));
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	struct nx_sg *nx_sg;
	unsigned int nbytes = assoclen;
	unsigned int processed = 0, to_process;
	unsigned int max_sg_len;

	/* Set GMAC mode */
	csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;

	NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;

	/* page_limit: number of sg entries that fit on one page */
	max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
			   nx_ctx->ap->sglen);
	max_sg_len = min_t(u64, max_sg_len,
			   nx_ctx->ap->databytelen/NX_PAGE_SIZE);

	/* Copy IV */
	memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);

	do {
		/*
		 * to_process: the data chunk to process in this update.
		 * This value is bound by sg list limits.
		 */
		to_process = min_t(u64, nbytes - processed,
				   nx_ctx->ap->databytelen);
		to_process = min_t(u64, to_process,
				   NX_PAGE_SIZE * (max_sg_len - 1));

		nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
					  req->src, processed, &to_process);

		if ((to_process + processed) < nbytes)
			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
		else
			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;

		nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
					* sizeof(struct nx_sg);

		csbcpb->cpb.aes_gcm.bit_length_data = 0;
		csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes;

		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
				req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
		if (rc)
			goto out;

		memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
			csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
		memcpy(csbcpb->cpb.aes_gcm.in_s0,
			csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);

		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;

		atomic_inc(&(nx_ctx->stats->aes_ops));
		atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));

		processed += to_process;
	} while (processed < nbytes);

out:
	/* Restore GCM mode */
	csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
	return rc;
}
Beispiel #12
0
static int nx_gca(struct nx_crypto_ctx  *nx_ctx,
		  struct aead_request   *req,
		  u8                    *out,
		  unsigned int assoclen)
{
	int rc;
	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
	struct scatter_walk walk;
	struct nx_sg *nx_sg = nx_ctx->in_sg;
	unsigned int nbytes = assoclen;
	unsigned int processed = 0, to_process;
	unsigned int max_sg_len;

	if (nbytes <= AES_BLOCK_SIZE) {
		scatterwalk_start(&walk, req->src);
		scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
		scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
		return 0;
	}

	NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;

	/* page_limit: number of sg entries that fit on one page */
	max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
			   nx_ctx->ap->sglen);
	max_sg_len = min_t(u64, max_sg_len,
			   nx_ctx->ap->databytelen/NX_PAGE_SIZE);

	do {
		/*
		 * to_process: the data chunk to process in this update.
		 * This value is bound by sg list limits.
		 */
		to_process = min_t(u64, nbytes - processed,
				   nx_ctx->ap->databytelen);
		to_process = min_t(u64, to_process,
				   NX_PAGE_SIZE * (max_sg_len - 1));

		nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
					  req->src, processed, &to_process);

		if ((to_process + processed) < nbytes)
			NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
		else
			NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;

		nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
					* sizeof(struct nx_sg);

		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
				req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
		if (rc)
			return rc;

		memcpy(csbcpb_aead->cpb.aes_gca.in_pat,
				csbcpb_aead->cpb.aes_gca.out_pat,
				AES_BLOCK_SIZE);
		NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;

		atomic_inc(&(nx_ctx->stats->aes_ops));
		atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));

		processed += to_process;
	} while (processed < nbytes);

	memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);

	return rc;
}