Exemple #1
0
static int cts_cbc_decrypt(struct crypto_cts_ctx *ctx,
			   struct blkcipher_desc *desc,
			   struct scatterlist *dst,
			   struct scatterlist *src,
			   unsigned int offset,
			   unsigned int nbytes)
{
	int bsize = crypto_blkcipher_blocksize(desc->tfm);
	u8 tmp[bsize];
	struct blkcipher_desc lcldesc;
	struct scatterlist sgsrc[1], sgdst[1];
	int lastn = nbytes - bsize;
	u8 iv[bsize];
	u8 s[bsize * 2], d[bsize * 2];
	int err;

	if (lastn < 0)
		return -EINVAL;

	sg_init_table(sgsrc, 1);
	sg_init_table(sgdst, 1);

	scatterwalk_map_and_copy(s, src, offset, nbytes, 0);

	lcldesc.tfm = ctx->child;
	lcldesc.info = iv;
	lcldesc.flags = desc->flags;

	/* 1. Decrypt Cn-1 (s) to create Dn (tmp)*/
	memset(iv, 0, sizeof(iv));
	sg_set_buf(&sgsrc[0], s, bsize);
	sg_set_buf(&sgdst[0], tmp, bsize);
	err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize);
	if (err)
		return err;
	/* 2. Pad Cn with zeros at the end to create C of length BB */
	memset(iv, 0, sizeof(iv));
	memcpy(iv, s + bsize, lastn);
	/* 3. Exclusive-or Dn (tmp) with C (iv) to create Xn (tmp) */
	crypto_xor(tmp, iv, bsize);
	/* 4. Select the first Ln bytes of Xn (tmp) to create Pn */
	memcpy(d + bsize, tmp, lastn);

	/* 5. Append the tail (BB - Ln) bytes of Xn (tmp) to Cn to create En */
	memcpy(s + bsize + lastn, tmp + lastn, bsize - lastn);
	/* 6. Decrypt En to create Pn-1 */
	memset(iv, 0, sizeof(iv));
	sg_set_buf(&sgsrc[0], s + bsize, bsize);
	sg_set_buf(&sgdst[0], d, bsize);
	err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize);

	/* XOR with previous block */
	crypto_xor(d, desc->info, bsize);

	scatterwalk_map_and_copy(d, dst, offset, nbytes, 1);

	memcpy(desc->info, s, bsize);
	return err;
}
Exemple #2
0
static int cts_cbc_encrypt(struct crypto_cts_ctx *ctx,
			   struct blkcipher_desc *desc,
			   struct scatterlist *dst,
			   struct scatterlist *src,
			   unsigned int offset,
			   unsigned int nbytes)
{
	int bsize = crypto_blkcipher_blocksize(desc->tfm);
	u8 tmp[bsize], tmp2[bsize];
	struct blkcipher_desc lcldesc;
	struct scatterlist sgsrc[1], sgdst[1];
	int lastn = nbytes - bsize;
	u8 iv[bsize];
	u8 s[bsize * 2], d[bsize * 2];
	int err;

	if (lastn < 0)
		return -EINVAL;

	sg_init_table(sgsrc, 1);
	sg_init_table(sgdst, 1);

	memset(s, 0, sizeof(s));
	scatterwalk_map_and_copy(s, src, offset, nbytes, 0);

	memcpy(iv, desc->info, bsize);

	lcldesc.tfm = ctx->child;
	lcldesc.info = iv;
	lcldesc.flags = desc->flags;

	sg_set_buf(&sgsrc[0], s, bsize);
	sg_set_buf(&sgdst[0], tmp, bsize);
	err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize);

	memcpy(d + bsize, tmp, lastn);

	lcldesc.info = tmp;

	sg_set_buf(&sgsrc[0], s + bsize, bsize);
	sg_set_buf(&sgdst[0], tmp2, bsize);
	err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize);

	memcpy(d, tmp2, bsize);

	scatterwalk_map_and_copy(d, dst, offset, nbytes, 1);

	memcpy(desc->info, tmp2, bsize);

	return err;
}
Exemple #3
0
static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
{
	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	unsigned int authlen = crypto_aead_authsize(aead);
	struct omap_aes_dev *dd;
	__be32 counter = cpu_to_be32(1);
	int err, assoclen;

	memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag));
	memcpy(rctx->iv + GCM_AES_IV_SIZE, &counter, 4);

	err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv);
	if (err)
		return err;

	if (mode & FLAGS_RFC4106_GCM)
		assoclen = req->assoclen - 8;
	else
		assoclen = req->assoclen;
	if (assoclen + req->cryptlen == 0) {
		scatterwalk_map_and_copy(rctx->auth_tag, req->dst, 0, authlen,
					 1);
		return 0;
	}

	dd = omap_aes_find_dev(rctx);
	if (!dd)
		return -ENODEV;
	rctx->mode = mode;

	return omap_aes_gcm_handle_queue(dd, req);
}
Exemple #4
0
static int seqiv_aead_decrypt(struct aead_request *req)
{
	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
	struct aead_request *subreq = aead_request_ctx(req);
	crypto_completion_t compl;
	void *data;
	unsigned int ivsize = 8;

	if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
		return -EINVAL;

	aead_request_set_tfm(subreq, ctx->child);

	compl = req->base.complete;
	data = req->base.data;

	aead_request_set_callback(subreq, req->base.flags, compl, data);
	aead_request_set_crypt(subreq, req->src, req->dst,
			       req->cryptlen - ivsize, req->iv);
	aead_request_set_ad(subreq, req->assoclen + ivsize);

	scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);

	return crypto_aead_decrypt(subreq);
}
static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
				 int ret)
{
	struct ahash_request *req = ahash_request_cast(async_req);
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
	unsigned int digest_size = crypto_ahash_digestsize(tfm);

	if (ret)
		goto e_free;

	if (rctx->hash_rem) {
		/* Save remaining data to buffer */
		unsigned int offset = rctx->nbytes - rctx->hash_rem;

		scatterwalk_map_and_copy(rctx->buf, rctx->src,
					 offset, rctx->hash_rem, 0);
		rctx->buf_count = rctx->hash_rem;
	} else {
		rctx->buf_count = 0;
	}

	/* Update result area if supplied */
	if (req->result)
		memcpy(req->result, rctx->iv, digest_size);

e_free:
	sg_free_table(&rctx->data_sg);

	return ret;
}
Exemple #6
0
static int omap_crypto_copy_sgs(int total, int bs, struct scatterlist **sg,
				struct scatterlist *new_sg, u16 flags)
{
	void *buf;
	int pages;
	int new_len;

	new_len = ALIGN(total, bs);
	pages = get_order(new_len);

	buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
	if (!buf) {
		pr_err("%s: Couldn't allocate pages for unaligned cases.\n",
		       __func__);
		return -ENOMEM;
	}

	if (flags & OMAP_CRYPTO_COPY_DATA) {
		scatterwalk_map_and_copy(buf, *sg, 0, total, 0);
		if (flags & OMAP_CRYPTO_ZERO_BUF)
			memset(buf + total, 0, new_len - total);
	}

	if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY))
		sg_init_table(new_sg, 1);

	sg_set_buf(new_sg, buf, new_len);

	*sg = new_sg;

	return 0;
}
Exemple #7
0
static int nitrox_rfc4106_set_aead_rctx_sglist(struct aead_request *areq)
{
	struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
	struct nitrox_aead_rctx *aead_rctx = &rctx->base;
	unsigned int assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE;
	struct scatterlist *sg;

	if (areq->assoclen != 16 && areq->assoclen != 20)
		return -EINVAL;

	scatterwalk_map_and_copy(rctx->assoc, areq->src, 0, assoclen, 0);
	sg_init_table(rctx->src, 3);
	sg_set_buf(rctx->src, rctx->assoc, assoclen);
	sg = scatterwalk_ffwd(rctx->src + 1, areq->src, areq->assoclen);
	if (sg != rctx->src + 1)
		sg_chain(rctx->src, 2, sg);

	if (areq->src != areq->dst) {
		sg_init_table(rctx->dst, 3);
		sg_set_buf(rctx->dst, rctx->assoc, assoclen);
		sg = scatterwalk_ffwd(rctx->dst + 1, areq->dst, areq->assoclen);
		if (sg != rctx->dst + 1)
			sg_chain(rctx->dst, 2, sg);
	}

	aead_rctx->src = rctx->src;
	aead_rctx->dst = (areq->src == areq->dst) ? rctx->src : rctx->dst;

	return 0;
}
static int crypto_ccm_decrypt(struct aead_request *req)
{
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
	struct ablkcipher_request *abreq = &pctx->abreq;
	struct scatterlist *dst;
	unsigned int authsize = crypto_aead_authsize(aead);
	unsigned int cryptlen = req->cryptlen;
	u8 *authtag = pctx->auth_tag;
	u8 *odata = pctx->odata;
	u8 *iv = req->iv;
	int err;

	if (cryptlen < authsize)
		return -EINVAL;
	cryptlen -= authsize;

	err = crypto_ccm_check_iv(iv);
	if (err)
		return err;

	pctx->flags = aead_request_flags(req);

	scatterwalk_map_and_copy(authtag, req->src, cryptlen, authsize, 0);

	memset(iv + 15 - iv[0], 0, iv[0] + 1);

	sg_init_table(pctx->src, 2);
	sg_set_buf(pctx->src, authtag, 16);
	scatterwalk_sg_chain(pctx->src, 2, req->src);

	dst = pctx->src;
	if (req->src != req->dst) {
		sg_init_table(pctx->dst, 2);
		sg_set_buf(pctx->dst, authtag, 16);
		scatterwalk_sg_chain(pctx->dst, 2, req->dst);
		dst = pctx->dst;
	}

	ablkcipher_request_set_tfm(abreq, ctx->ctr);
	ablkcipher_request_set_callback(abreq, pctx->flags,
					crypto_ccm_decrypt_done, req);
	ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv);
	err = crypto_ablkcipher_decrypt(abreq);
	if (err)
		return err;

	err = crypto_ccm_auth(req, req->dst, cryptlen);
	if (err)
		return err;

	/* verify */
	if (crypto_memneq(authtag, odata, authsize))
		return -EBADMSG;

	return err;
}
Exemple #9
0
static void gcm_enc_copy_hash(struct aead_request *req,
                              struct crypto_gcm_req_priv_ctx *pctx)
{
    struct crypto_aead *aead = crypto_aead_reqtfm(req);
    u8 *auth_tag = pctx->auth_tag;

    scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen,
                             crypto_aead_authsize(aead), 1);
}
Exemple #10
0
static int ccm_encrypt(struct aead_request *req)
{
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
	struct skcipher_walk walk;
	u8 __aligned(8) mac[AES_BLOCK_SIZE];
	u8 buf[AES_BLOCK_SIZE];
	u32 len = req->cryptlen;
	int err;

	err = ccm_init_mac(req, mac, len);
	if (err)
		return err;

	if (req->assoclen)
		ccm_calculate_auth_mac(req, mac);

	/* preserve the original iv for the final round */
	memcpy(buf, req->iv, AES_BLOCK_SIZE);

	err = skcipher_walk_aead_encrypt(&walk, req, false);

	if (crypto_simd_usable()) {
		while (walk.nbytes) {
			u32 tail = walk.nbytes % AES_BLOCK_SIZE;

			if (walk.nbytes == walk.total)
				tail = 0;

			kernel_neon_begin();
			ce_aes_ccm_encrypt(walk.dst.virt.addr,
					   walk.src.virt.addr,
					   walk.nbytes - tail, ctx->key_enc,
					   num_rounds(ctx), mac, walk.iv);
			kernel_neon_end();

			err = skcipher_walk_done(&walk, tail);
		}
		if (!err) {
			kernel_neon_begin();
			ce_aes_ccm_final(mac, buf, ctx->key_enc,
					 num_rounds(ctx));
			kernel_neon_end();
		}
	} else {
		err = ccm_crypt_fallback(&walk, mac, buf, ctx, true);
	}
	if (err)
		return err;

	/* copy authtag to end of dst */
	scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen,
				 crypto_aead_authsize(aead), 1);

	return 0;
}
static int pad_record( struct scatterlist *dst_sg, int len, int block_size)
{
	uint8_t pad[block_size];
	int pad_size = block_size - (len % block_size);

	memset(pad, pad_size-1, pad_size);

	scatterwalk_map_and_copy(pad, dst_sg, len, pad_size, 1);

	return pad_size;
}
Exemple #12
0
static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
						 int enc)
{
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead);
	struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req);
	struct aead_request *subreq = &rctx->subreq;
	struct scatterlist *dst = req->dst;
	struct scatterlist *cipher = rctx->cipher;
	struct scatterlist *payload = rctx->payload;
	struct scatterlist *assoc = rctx->assoc;
	unsigned int authsize = crypto_aead_authsize(aead);
	unsigned int assoclen = req->assoclen;
	struct page *dstp;
	u8 *vdst;
	u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child),
			   crypto_aead_alignmask(ctx->child) + 1);

	memcpy(iv, ctx->nonce, 4);
	memcpy(iv + 4, req->iv, 8);

	/* construct cipher/plaintext */
	if (enc)
		memset(rctx->auth_tag, 0, authsize);
	else
		scatterwalk_map_and_copy(rctx->auth_tag, dst,
					 req->cryptlen - authsize,
					 authsize, 0);

	sg_init_one(cipher, rctx->auth_tag, authsize);

	/* construct the aad */
	dstp = sg_page(dst);
	vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;

	sg_init_table(payload, 2);
	sg_set_buf(payload, req->iv, 8);
	scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2);
	assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);

	sg_init_table(assoc, 2);
	sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
		    req->assoc->offset);
	scatterwalk_crypto_chain(assoc, payload, 0, 2);

	aead_request_set_tfm(subreq, ctx->child);
	aead_request_set_callback(subreq, req->base.flags, req->base.complete,
				  req->base.data);
	aead_request_set_crypt(subreq, cipher, cipher, enc ? 0 : authsize, iv);
	aead_request_set_assoc(subreq, assoc, assoclen);

	return subreq;
}
static void crypto_ccm_encrypt_done(struct crypto_async_request *areq, int err)
{
	struct aead_request *req = areq->data;
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
	u8 *odata = pctx->odata;

	if (!err)
		scatterwalk_map_and_copy(odata, req->dst, req->cryptlen,
					 crypto_aead_authsize(aead), 1);
	aead_request_complete(req, err);
}
static int crypto_ccm_encrypt(struct aead_request *req)
{
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
	struct ablkcipher_request *abreq = &pctx->abreq;
	struct scatterlist *dst;
	unsigned int cryptlen = req->cryptlen;
	u8 *odata = pctx->odata;
	u8 *iv = req->iv;
	int err;

	err = crypto_ccm_check_iv(iv);
	if (err)
		return err;

	pctx->flags = aead_request_flags(req);

	err = crypto_ccm_auth(req, req->src, cryptlen);
	if (err)
		return err;

	 /* Note: rfc 3610 and NIST 800-38C require counter of
	 * zero to encrypt auth tag.
	 */
	memset(iv + 15 - iv[0], 0, iv[0] + 1);

	sg_init_table(pctx->src, 2);
	sg_set_buf(pctx->src, odata, 16);
	scatterwalk_sg_chain(pctx->src, 2, req->src);

	dst = pctx->src;
	if (req->src != req->dst) {
		sg_init_table(pctx->dst, 2);
		sg_set_buf(pctx->dst, odata, 16);
		scatterwalk_sg_chain(pctx->dst, 2, req->dst);
		dst = pctx->dst;
	}

	ablkcipher_request_set_tfm(abreq, ctx->ctr);
	ablkcipher_request_set_callback(abreq, pctx->flags,
					crypto_ccm_encrypt_done, req);
	ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv);
	err = crypto_ablkcipher_encrypt(abreq);
	if (err)
		return err;

	/* copy authtag to end of dst */
	scatterwalk_map_and_copy(odata, req->dst, cryptlen,
				 crypto_aead_authsize(aead), 1);
	return err;
}
Exemple #15
0
static int crypto_gcm_verify(struct aead_request *req,
                             struct crypto_gcm_req_priv_ctx *pctx)
{
    struct crypto_aead *aead = crypto_aead_reqtfm(req);
    u8 *auth_tag = pctx->auth_tag;
    u8 *iauth_tag = pctx->iauth_tag;
    unsigned int authsize = crypto_aead_authsize(aead);
    unsigned int cryptlen = req->cryptlen - authsize;

    crypto_xor(auth_tag, iauth_tag, 16);
    scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0);
    return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
}
Exemple #16
0
static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc)
{
    struct crypto_aead *aead = crypto_aead_reqtfm(req);
    struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead);
    unsigned int authsize = crypto_aead_authsize(aead);
    unsigned int nbytes = req->cryptlen - (enc ? 0 : authsize);
    struct blkcipher_desc desc = {
        .tfm = ctx->null,
    };

    return crypto_blkcipher_encrypt(&desc, req->dst, req->src, nbytes);
}

static int crypto_rfc4543_encrypt(struct aead_request *req)
{
    struct crypto_aead *aead = crypto_aead_reqtfm(req);
    struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req);
    struct aead_request *subreq;
    int err;

    if (req->src != req->dst) {
        err = crypto_rfc4543_copy_src_to_dst(req, true);
        if (err)
            return err;
    }

    subreq = crypto_rfc4543_crypt(req, true);
    err = crypto_aead_encrypt(subreq);
    if (err)
        return err;

    scatterwalk_map_and_copy(rctx->auth_tag, req->dst, req->cryptlen,
                             crypto_aead_authsize(aead), 1);

    return 0;
}

static int crypto_rfc4543_decrypt(struct aead_request *req)
{
    int err;

    if (req->src != req->dst) {
        err = crypto_rfc4543_copy_src_to_dst(req, false);
        if (err)
            return err;
    }

    req = crypto_rfc4543_crypt(req, false);

    return crypto_aead_decrypt(req);
}
Exemple #17
0
static void crypto_rfc4543_done(struct crypto_async_request *areq, int err)
{
    struct aead_request *req = areq->data;
    struct crypto_aead *aead = crypto_aead_reqtfm(req);
    struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req);

    if (!err) {
        scatterwalk_map_and_copy(rctx->auth_tag, req->dst,
                                 req->cryptlen,
                                 crypto_aead_authsize(aead), 1);
    }

    aead_request_complete(req, err);
}
static int verify_tls_record_pad( struct scatterlist *dst_sg, int len, int block_size)
{
	uint8_t pad[256]; /* the maximum allowed */
	uint8_t pad_size;
	int i;

	scatterwalk_map_and_copy(&pad_size, dst_sg, len-1, 1, 0);

	if (pad_size+1 > len) {
		dprintk(1, KERN_ERR, "Pad size: %d\n", pad_size);
		return -EBADMSG;
	}

	scatterwalk_map_and_copy(pad, dst_sg, len-pad_size-1, pad_size+1, 0);

	for (i=0;i<pad_size;i++)
		if (pad[i] != pad_size) {
			dprintk(1, KERN_ERR, "Pad size: %d, pad: %d\n", pad_size, (int)pad[i]);
			return -EBADMSG;
		}

	return pad_size+1;
}
Exemple #19
0
static int crypto_gcm_hash(struct aead_request *req)
{
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
	u8 *auth_tag = pctx->auth_tag;
	struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;

	crypto_gcm_ghash_update_sg(ghash, req->dst, req->cryptlen);
	crypto_gcm_ghash_final_xor(ghash, req->assoclen, req->cryptlen,
				   auth_tag);

	scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen,
				 crypto_aead_authsize(aead), 1);
	return 0;
}
Exemple #20
0
static int crypto_gcm_verify(struct aead_request *req)
{
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
	struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
	u8 *auth_tag = pctx->auth_tag;
	u8 *iauth_tag = pctx->iauth_tag;
	unsigned int authsize = crypto_aead_authsize(aead);
	unsigned int cryptlen = req->cryptlen - authsize;

	crypto_gcm_ghash_final_xor(ghash, req->assoclen, cryptlen, auth_tag);

	authsize = crypto_aead_authsize(aead);
	scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0);
	return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
}
Exemple #21
0
static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
{
	u8 *tag;
	int alen, clen, i, ret = 0, nsg;
	struct omap_aes_reqctx *rctx;

	alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE);
	clen = ALIGN(dd->total, AES_BLOCK_SIZE);
	rctx = aead_request_ctx(dd->aead_req);

	nsg = !!(dd->assoc_len && dd->total);

	dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
			       DMA_FROM_DEVICE);
	dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
	dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
	omap_aes_crypt_dma_stop(dd);

	omap_crypto_cleanup(dd->out_sg, dd->orig_out,
			    dd->aead_req->assoclen, dd->total,
			    FLAGS_OUT_DATA_ST_SHIFT, dd->flags);

	if (dd->flags & FLAGS_ENCRYPT)
		scatterwalk_map_and_copy(rctx->auth_tag,
					 dd->aead_req->dst,
					 dd->total + dd->aead_req->assoclen,
					 dd->authsize, 1);

	omap_crypto_cleanup(&dd->in_sgl[0], NULL, 0, alen,
			    FLAGS_ASSOC_DATA_ST_SHIFT, dd->flags);

	omap_crypto_cleanup(&dd->in_sgl[nsg], NULL, 0, clen,
			    FLAGS_IN_DATA_ST_SHIFT, dd->flags);

	if (!(dd->flags & FLAGS_ENCRYPT)) {
		tag = (u8 *)rctx->auth_tag;
		for (i = 0; i < dd->authsize; i++) {
			if (tag[i]) {
				dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n");
				ret = -EBADMSG;
			}
		}
	}

	omap_aes_gcm_finish_req(dd, ret);
	omap_aes_gcm_handle_queue(dd, NULL);
}
Exemple #22
0
static int crypto_rfc4543_encrypt(struct aead_request *req)
{
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req);
	struct aead_request *subreq;
	int err;

	subreq = crypto_rfc4543_crypt(req, 1);
	err = crypto_aead_encrypt(subreq);
	if (err)
		return err;

	scatterwalk_map_and_copy(rctx->auth_tag, req->dst, req->cryptlen,
				 crypto_aead_authsize(aead), 1);

	return 0;
}
static int crypto_ccm_decrypt(struct aead_request *req)
{
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
	struct ablkcipher_request *abreq = &pctx->abreq;
	struct scatterlist *dst;
	unsigned int authsize = crypto_aead_authsize(aead);
	unsigned int cryptlen = req->cryptlen;
	u8 *authtag = pctx->auth_tag;
	u8 *odata = pctx->odata;
	u8 *iv = req->iv;
	int err;

	cryptlen -= authsize;

	err = crypto_ccm_init_crypt(req, authtag);
	if (err)
		return err;

	scatterwalk_map_and_copy(authtag, sg_next(pctx->src), cryptlen,
				 authsize, 0);

	dst = pctx->src;
	if (req->src != req->dst)
		dst = pctx->dst;

	ablkcipher_request_set_tfm(abreq, ctx->ctr);
	ablkcipher_request_set_callback(abreq, pctx->flags,
					crypto_ccm_decrypt_done, req);
	ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv);
	err = crypto_ablkcipher_decrypt(abreq);
	if (err)
		return err;

	err = crypto_ccm_auth(req, sg_next(dst), cryptlen);
	if (err)
		return err;

	/* verify */
	if (crypto_memneq(authtag, odata, authsize))
		return -EBADMSG;

	return err;
}
static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req)
{
	struct crypto_rfc4309_req_ctx *rctx = aead_request_ctx(req);
	struct aead_request *subreq = &rctx->subreq;
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead);
	struct crypto_aead *child = ctx->child;
	struct scatterlist *sg;
	u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
			   crypto_aead_alignmask(child) + 1);

	/* L' */
	iv[0] = 3;

	memcpy(iv + 1, ctx->nonce, 3);
	memcpy(iv + 4, req->iv, 8);

	scatterwalk_map_and_copy(iv + 16, req->src, 0, req->assoclen - 8, 0);

	sg_init_table(rctx->src, 3);
	sg_set_buf(rctx->src, iv + 16, req->assoclen - 8);
	sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen);
	if (sg != rctx->src + 1)
		sg_chain(rctx->src, 2, sg);

	if (req->src != req->dst) {
		sg_init_table(rctx->dst, 3);
		sg_set_buf(rctx->dst, iv + 16, req->assoclen - 8);
		sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen);
		if (sg != rctx->dst + 1)
			sg_chain(rctx->dst, 2, sg);
	}

	aead_request_set_tfm(subreq, child);
	aead_request_set_callback(subreq, req->base.flags, req->base.complete,
				  req->base.data);
	aead_request_set_crypt(subreq, rctx->src,
			       req->src == req->dst ? rctx->src : rctx->dst,
			       req->cryptlen, iv);
	aead_request_set_ad(subreq, req->assoclen - 8);

	return subreq;
}
static int crypto_aegis256_aesni_encrypt(struct aead_request *req)
{
	static const struct aegis_crypt_ops OPS = {
		.skcipher_walk_init = skcipher_walk_aead_encrypt,
		.crypt_blocks = crypto_aegis256_aesni_enc,
		.crypt_tail = crypto_aegis256_aesni_enc_tail,
	};

	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct aegis_block tag = {};
	unsigned int authsize = crypto_aead_authsize(tfm);
	unsigned int cryptlen = req->cryptlen;

	crypto_aegis256_aesni_crypt(req, &tag, cryptlen, &OPS);

	scatterwalk_map_and_copy(tag.bytes, req->dst,
				 req->assoclen + cryptlen, authsize, 1);
	return 0;
}
static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
{
	struct qat_asym_request *req = (void *)(__force long)resp->opaque;
	struct kpp_request *areq = req->areq.dh;
	struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
	int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
				resp->pke_resp_hdr.comn_resp_flags);

	err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;

	if (areq->src) {
		if (req->src_align)
			dma_free_coherent(dev, req->ctx.dh->p_size,
					  req->src_align, req->in.dh.in.b);
		else
			dma_unmap_single(dev, req->in.dh.in.b,
					 req->ctx.dh->p_size, DMA_TO_DEVICE);
	}

	areq->dst_len = req->ctx.dh->p_size;
	if (req->dst_align) {
		scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
					 areq->dst_len, 1);

		dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
				  req->out.dh.r);
	} else {
		dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
				 DMA_FROM_DEVICE);
	}

	dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
			 DMA_TO_DEVICE);
	dma_unmap_single(dev, req->phy_out,
			 sizeof(struct qat_dh_output_params),
			 DMA_TO_DEVICE);

	kpp_request_complete(areq, err);
}
static int crypto_aegis256_aesni_decrypt(struct aead_request *req)
{
	static const struct aegis_block zeros = {};

	static const struct aegis_crypt_ops OPS = {
		.skcipher_walk_init = skcipher_walk_aead_decrypt,
		.crypt_blocks = crypto_aegis256_aesni_dec,
		.crypt_tail = crypto_aegis256_aesni_dec_tail,
	};

	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct aegis_block tag;
	unsigned int authsize = crypto_aead_authsize(tfm);
	unsigned int cryptlen = req->cryptlen - authsize;

	scatterwalk_map_and_copy(tag.bytes, req->src,
				 req->assoclen + cryptlen, authsize, 0);

	crypto_aegis256_aesni_crypt(req, &tag, cryptlen, &OPS);

	return crypto_memneq(tag.bytes, zeros.bytes, authsize) ? -EBADMSG : 0;
}
Exemple #28
0
void omap_aes_gcm_dma_out_callback(void *data)
{
	struct omap_aes_dev *dd = data;
	struct omap_aes_reqctx *rctx;
	int i, val;
	u32 *auth_tag, tag[4];

	if (!(dd->flags & FLAGS_ENCRYPT))
		scatterwalk_map_and_copy(tag, dd->aead_req->src,
					 dd->total + dd->aead_req->assoclen,
					 dd->authsize, 0);

	rctx = aead_request_ctx(dd->aead_req);
	auth_tag = (u32 *)rctx->auth_tag;
	for (i = 0; i < 4; i++) {
		val = omap_aes_read(dd, AES_REG_TAG_N(dd, i));
		auth_tag[i] = val ^ auth_tag[i];
		if (!(dd->flags & FLAGS_ENCRYPT))
			auth_tag[i] = auth_tag[i] ^ tag[i];
	}

	omap_aes_gcm_done_task(dd);
}
static int crypto_ccm_encrypt(struct aead_request *req)
{
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
	struct ablkcipher_request *abreq = &pctx->abreq;
	struct scatterlist *dst;
	unsigned int cryptlen = req->cryptlen;
	u8 *odata = pctx->odata;
	u8 *iv = req->iv;
	int err;

	err = crypto_ccm_init_crypt(req, odata);
	if (err)
		return err;

	err = crypto_ccm_auth(req, sg_next(pctx->src), cryptlen);
	if (err)
		return err;

	dst = pctx->src;
	if (req->src != req->dst)
		dst = pctx->dst;

	ablkcipher_request_set_tfm(abreq, ctx->ctr);
	ablkcipher_request_set_callback(abreq, pctx->flags,
					crypto_ccm_encrypt_done, req);
	ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv);
	err = crypto_ablkcipher_encrypt(abreq);
	if (err)
		return err;

	/* copy authtag to end of dst */
	scatterwalk_map_and_copy(odata, sg_next(dst), cryptlen,
				 crypto_aead_authsize(aead), 1);
	return err;
}
Exemple #30
0
void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
			 int offset, int len, u8 flags_shift,
			 unsigned long flags)
{
	void *buf;
	int pages;

	flags >>= flags_shift;
	flags &= OMAP_CRYPTO_COPY_MASK;

	if (!flags)
		return;

	buf = sg_virt(sg);
	pages = get_order(len);

	if (orig && (flags & OMAP_CRYPTO_COPY_MASK))
		scatterwalk_map_and_copy(buf, orig, offset, len, 1);

	if (flags & OMAP_CRYPTO_DATA_COPIED)
		free_pages((unsigned long)buf, pages);
	else if (flags & OMAP_CRYPTO_SG_COPIED)
		kfree(sg);
}