示例#1
0
static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
			    struct scatterlist    *dst,
			    struct scatterlist    *src,
			    unsigned int           nbytes,
			    int                    enc)
{
	struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	unsigned long irq_flags;
	unsigned int processed = 0, to_process;
	u32 max_sg_len;
	int rc;

	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

	max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
			   nx_ctx->ap->sglen);

	if (enc)
		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
	else
		NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;

	do {
		to_process = min_t(u64, nbytes - processed,
				   nx_ctx->ap->databytelen);
		to_process = min_t(u64, to_process,
				   NX_PAGE_SIZE * (max_sg_len - 1));
		to_process = to_process & ~(AES_BLOCK_SIZE - 1);

		rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process,
				processed, NULL);
		if (rc)
			goto out;

		if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
			rc = -EINVAL;
			goto out;
		}

		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
				   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
		if (rc)
			goto out;

		atomic_inc(&(nx_ctx->stats->aes_ops));
		atomic64_add(csbcpb->csb.processed_byte_count,
			     &(nx_ctx->stats->aes_bytes));

		processed += to_process;
	} while (processed < nbytes);

out:
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
	return rc;
}
示例#2
0
static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
			    struct scatterlist    *dst,
			    struct scatterlist    *src,
			    unsigned int           nbytes)
{
	struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	unsigned long irq_flags;
	unsigned int processed = 0, to_process;
	int rc;

	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

	do {
		to_process = nbytes - processed;

		rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
				       processed, csbcpb->cpb.aes_ctr.iv);
		if (rc)
			goto out;

		if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
			rc = -EINVAL;
			goto out;
		}

		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
				   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
		if (rc)
			goto out;

		memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);

		atomic_inc(&(nx_ctx->stats->aes_ops));
		atomic64_add(csbcpb->csb.processed_byte_count,
			     &(nx_ctx->stats->aes_bytes));

		processed += to_process;
	} while (processed < nbytes);
out:
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
	return rc;
}
示例#3
0
static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
			    struct scatterlist    *dst,
			    struct scatterlist    *src,
			    unsigned int           nbytes,
			    int                    enc)
{
	struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	int rc;

	if (nbytes > nx_ctx->ap->databytelen)
		return -EINVAL;

	if (enc)
		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
	else
		NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;

	rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, NULL);
	if (rc)
		goto out;

	if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
		rc = -EINVAL;
		goto out;
	}

	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
	if (rc)
		goto out;

	atomic_inc(&(nx_ctx->stats->aes_ops));
	atomic64_add(csbcpb->csb.processed_byte_count,
		     &(nx_ctx->stats->aes_bytes));
out:
	return rc;
}
示例#4
0
static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
{
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	struct blkcipher_desc desc;
	unsigned int nbytes = req->cryptlen;
	int rc = -EINVAL;

	if (nbytes > nx_ctx->ap->databytelen)
		goto out;

	desc.info = nx_ctx->priv.gcm.iv;
	/* initialize the counter */
	*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;

	/* For scenarios where the input message is zero length, AES CTR mode
	 * may be used. Set the source data to be a single block (16B) of all
	 * zeros, and set the input IV value to be the same as the GMAC IV
	 * value. - nx_wb 4.8.1.3 */
	if (nbytes == 0) {
		char src[AES_BLOCK_SIZE] = {};
		struct scatterlist sg;

		desc.tfm = crypto_alloc_blkcipher("ctr(aes)", 0, 0);
		if (IS_ERR(desc.tfm)) {
			rc = -ENOMEM;
			goto out;
		}

		crypto_blkcipher_setkey(desc.tfm, csbcpb->cpb.aes_gcm.key,
			NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_128 ? 16 :
			NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_192 ? 24 : 32);

		sg_init_one(&sg, src, AES_BLOCK_SIZE);
		if (enc)
			crypto_blkcipher_encrypt_iv(&desc, req->dst, &sg,
						    AES_BLOCK_SIZE);
		else
			crypto_blkcipher_decrypt_iv(&desc, req->dst, &sg,
						    AES_BLOCK_SIZE);
		crypto_free_blkcipher(desc.tfm);

		rc = 0;
		goto out;
	}

	desc.tfm = (struct crypto_blkcipher *)req->base.tfm;

	csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8;

	if (req->assoclen) {
		rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad);
		if (rc)
			goto out;
	}

	if (enc)
		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
	else
		nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));

	csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;

	rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, req->src, nbytes,
			       csbcpb->cpb.aes_gcm.iv_or_cnt);
	if (rc)
		goto out;

	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
			   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
	if (rc)
		goto out;

	atomic_inc(&(nx_ctx->stats->aes_ops));
	atomic64_add(csbcpb->csb.processed_byte_count,
		     &(nx_ctx->stats->aes_bytes));

	if (enc) {
		/* copy out the auth tag */
		scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac,
				 req->dst, nbytes,
				 crypto_aead_authsize(crypto_aead_reqtfm(req)),
				 SCATTERWALK_TO_SG);
	} else if (req->assoclen) {
		u8 *itag = nx_ctx->priv.gcm.iauth_tag;
		u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;

		scatterwalk_map_and_copy(itag, req->dst, nbytes,
				 crypto_aead_authsize(crypto_aead_reqtfm(req)),
				 SCATTERWALK_FROM_SG);
		rc = memcmp(itag, otag,
			    crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
		     -EBADMSG : 0;
	}
out:
	return rc;
}
示例#5
0
static int ccm_nx_encrypt(struct aead_request   *req,
			  struct blkcipher_desc *desc)
{
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	unsigned int nbytes = req->cryptlen;
	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
	unsigned long irq_flags;
	unsigned int processed = 0, to_process;
	int rc = -1;

	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
	if (rc)
		goto out;

	do {
		/* to process: the AES_BLOCK_SIZE data chunk to process in this
		 * update. This value is bound by sg list limits.
		 */
		to_process = nbytes - processed;

		if ((to_process + processed) < nbytes)
			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
		else
			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;

		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;

		rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
					&to_process, processed,
				       csbcpb->cpb.aes_ccm.iv_or_ctr);
		if (rc)
			goto out;

		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
		if (rc)
			goto out;

		/* for partial completion, copy following for next
		 * entry into loop...
		 */
		memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
		memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
			csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
		memcpy(csbcpb->cpb.aes_ccm.in_s0,
			csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);

		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;

		/* update stats */
		atomic_inc(&(nx_ctx->stats->aes_ops));
		atomic64_add(csbcpb->csb.processed_byte_count,
			     &(nx_ctx->stats->aes_bytes));

		processed += to_process;

	} while (processed < nbytes);

	/* copy out the auth tag */
	scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
				 req->dst, nbytes, authsize,
				 SCATTERWALK_TO_SG);

out:
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
	return rc;
}
示例#6
0
static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
			    unsigned int assoclen)
{
	struct nx_crypto_ctx *nx_ctx =
		crypto_aead_ctx(crypto_aead_reqtfm(req));
	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	struct blkcipher_desc desc;
	unsigned int nbytes = req->cryptlen;
	unsigned int processed = 0, to_process;
	unsigned long irq_flags;
	int rc = -EINVAL;

	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

	desc.info = rctx->iv;
	/* initialize the counter */
	*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;

	if (nbytes == 0) {
		if (assoclen == 0)
			rc = gcm_empty(req, &desc, enc);
		else
			rc = gmac(req, &desc, assoclen);
		if (rc)
			goto out;
		else
			goto mac;
	}

	/* Process associated data */
	csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8;
	if (assoclen) {
		rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad,
			    assoclen);
		if (rc)
			goto out;
	}

	/* Set flags for encryption */
	NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
	if (enc) {
		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
	} else {
		NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
		nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
	}

	do {
		to_process = nbytes - processed;

		csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
		rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
				       req->src, &to_process,
				       processed + req->assoclen,
				       csbcpb->cpb.aes_gcm.iv_or_cnt);

		if (rc)
			goto out;

		if ((to_process + processed) < nbytes)
			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
		else
			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;


		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
		if (rc)
			goto out;

		memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
		memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
			csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
		memcpy(csbcpb->cpb.aes_gcm.in_s0,
			csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);

		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;

		atomic_inc(&(nx_ctx->stats->aes_ops));
		atomic64_add(csbcpb->csb.processed_byte_count,
			     &(nx_ctx->stats->aes_bytes));

		processed += to_process;
	} while (processed < nbytes);

mac:
	if (enc) {
		/* copy out the auth tag */
		scatterwalk_map_and_copy(
			csbcpb->cpb.aes_gcm.out_pat_or_mac,
			req->dst, req->assoclen + nbytes,
			crypto_aead_authsize(crypto_aead_reqtfm(req)),
			SCATTERWALK_TO_SG);
	} else {
		u8 *itag = nx_ctx->priv.gcm.iauth_tag;
		u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;

		scatterwalk_map_and_copy(
			itag, req->src, req->assoclen + nbytes,
			crypto_aead_authsize(crypto_aead_reqtfm(req)),
			SCATTERWALK_FROM_SG);
		rc = memcmp(itag, otag,
			    crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
		     -EBADMSG : 0;
	}
out:
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
	return rc;
}