Example #1
0
static void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, u8 *dst,
				  const u8 *src, unsigned int bytes)
{
	u8 buf[64];

	if (dst != src)
		memcpy(dst, src, bytes);

	while (bytes) {
		salsa20_wordtobyte(buf, ctx->input);

		ctx->input[8] = PLUSONE(ctx->input[8]);
		if (!ctx->input[8])
			ctx->input[9] = PLUSONE(ctx->input[9]);

		if (bytes <= 64) {
			crypto_xor(dst, buf, bytes);
			return;
		}

		crypto_xor(dst, buf, 64);
		bytes -= 64;
		dst += 64;
	}
}
static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
				       struct blkcipher_walk *walk,
				       struct crypto_cipher *tfm)
{
	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
		crypto_cipher_alg(tfm)->cia_encrypt;
	int bsize = crypto_cipher_blocksize(tfm);
	unsigned int nbytes = walk->nbytes;
	u8 *src = walk->src.virt.addr;
	u8 *iv = walk->iv;
	u8 tmpbuf[bsize];

	do {
		memcpy(tmpbuf, src, bsize);
		crypto_xor(iv, src, bsize);
		fn(crypto_cipher_tfm(tfm), src, iv);
		memcpy(iv, tmpbuf, bsize);
		crypto_xor(iv, src, bsize);

		src += bsize;
	} while ((nbytes -= bsize) >= bsize);

	memcpy(walk->iv, iv, bsize);

	return nbytes;
}
Example #3
0
static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc,
				      struct blkcipher_walk *walk,
				      struct crypto_cipher *tfm)
{
	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
		crypto_cipher_alg(tfm)->cia_decrypt;
	int bsize = crypto_cipher_blocksize(tfm);
	unsigned int nbytes = walk->nbytes;
	u8 *src = walk->src.virt.addr;
	u8 last_iv[bsize];

	/* Start of the last block. */
	src += nbytes - (nbytes & (bsize - 1)) - bsize;
	memcpy(last_iv, src, bsize);

	for (;;) {
		fn(crypto_cipher_tfm(tfm), src, src);
		if ((nbytes -= bsize) < bsize)
			break;
		crypto_xor(src, src - bsize, bsize);
		src -= bsize;
	}

	crypto_xor(src, walk->iv, bsize);
	memcpy(walk->iv, last_iv, bsize);

	return nbytes;
}
static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
				       struct blkcipher_walk *walk,
				       struct crypto_cipher *tfm)
{
	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
		crypto_cipher_alg(tfm)->cia_decrypt;
	int bsize = crypto_cipher_blocksize(tfm);
	unsigned int nbytes = walk->nbytes;
	u8 *src = walk->src.virt.addr;
	u8 *dst = walk->dst.virt.addr;
	u8 *iv = walk->iv;

	do {
		fn(crypto_cipher_tfm(tfm), dst, src);
		crypto_xor(dst, iv, bsize);
		memcpy(iv, src, bsize);
		crypto_xor(iv, dst, bsize);

		src += bsize;
		dst += bsize;
	} while ((nbytes -= bsize) >= bsize);

	memcpy(walk->iv, iv, bsize);

	return nbytes;
}
Example #5
0
static int cts_cbc_decrypt(struct crypto_cts_ctx *ctx,
			   struct blkcipher_desc *desc,
			   struct scatterlist *dst,
			   struct scatterlist *src,
			   unsigned int offset,
			   unsigned int nbytes)
{
	int bsize = crypto_blkcipher_blocksize(desc->tfm);
	u8 tmp[bsize];
	struct blkcipher_desc lcldesc;
	struct scatterlist sgsrc[1], sgdst[1];
	int lastn = nbytes - bsize;
	u8 iv[bsize];
	u8 s[bsize * 2], d[bsize * 2];
	int err;

	if (lastn < 0)
		return -EINVAL;

	sg_init_table(sgsrc, 1);
	sg_init_table(sgdst, 1);

	scatterwalk_map_and_copy(s, src, offset, nbytes, 0);

	lcldesc.tfm = ctx->child;
	lcldesc.info = iv;
	lcldesc.flags = desc->flags;

	/* 1. Decrypt Cn-1 (s) to create Dn (tmp)*/
	memset(iv, 0, sizeof(iv));
	sg_set_buf(&sgsrc[0], s, bsize);
	sg_set_buf(&sgdst[0], tmp, bsize);
	err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize);
	if (err)
		return err;
	/* 2. Pad Cn with zeros at the end to create C of length BB */
	memset(iv, 0, sizeof(iv));
	memcpy(iv, s + bsize, lastn);
	/* 3. Exclusive-or Dn (tmp) with C (iv) to create Xn (tmp) */
	crypto_xor(tmp, iv, bsize);
	/* 4. Select the first Ln bytes of Xn (tmp) to create Pn */
	memcpy(d + bsize, tmp, lastn);

	/* 5. Append the tail (BB - Ln) bytes of Xn (tmp) to Cn to create En */
	memcpy(s + bsize + lastn, tmp + lastn, bsize - lastn);
	/* 6. Decrypt En to create Pn-1 */
	memset(iv, 0, sizeof(iv));
	sg_set_buf(&sgsrc[0], s + bsize, bsize);
	sg_set_buf(&sgdst[0], d, bsize);
	err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize);

	/* XOR with previous block */
	crypto_xor(d, desc->info, bsize);

	scatterwalk_map_and_copy(d, dst, offset, nbytes, 1);

	memcpy(desc->info, s, bsize);
	return err;
}
Example #6
0
static int ccm_crypt_fallback(struct skcipher_walk *walk, u8 mac[], u8 iv0[],
			      struct crypto_aes_ctx *ctx, bool enc)
{
	u8 buf[AES_BLOCK_SIZE];
	int err = 0;

	while (walk->nbytes) {
		int blocks = walk->nbytes / AES_BLOCK_SIZE;
		u32 tail = walk->nbytes % AES_BLOCK_SIZE;
		u8 *dst = walk->dst.virt.addr;
		u8 *src = walk->src.virt.addr;
		u32 nbytes = walk->nbytes;

		if (nbytes == walk->total && tail > 0) {
			blocks++;
			tail = 0;
		}

		do {
			u32 bsize = AES_BLOCK_SIZE;

			if (nbytes < AES_BLOCK_SIZE)
				bsize = nbytes;

			crypto_inc(walk->iv, AES_BLOCK_SIZE);
			__aes_arm64_encrypt(ctx->key_enc, buf, walk->iv,
					    num_rounds(ctx));
			__aes_arm64_encrypt(ctx->key_enc, mac, mac,
					    num_rounds(ctx));
			if (enc)
				crypto_xor(mac, src, bsize);
			crypto_xor_cpy(dst, src, buf, bsize);
			if (!enc)
				crypto_xor(mac, dst, bsize);
			dst += bsize;
			src += bsize;
			nbytes -= bsize;
		} while (--blocks);

		err = skcipher_walk_done(walk, tail);
	}

	if (!err) {
		__aes_arm64_encrypt(ctx->key_enc, buf, iv0, num_rounds(ctx));
		__aes_arm64_encrypt(ctx->key_enc, mac, mac, num_rounds(ctx));
		crypto_xor(mac, buf, AES_BLOCK_SIZE);
	}
	return err;
}
Example #7
0
static void crypto_gcm_ghash_final_xor(struct crypto_gcm_ghash_ctx *ctx,
				       unsigned int authlen,
				       unsigned int cryptlen, u8 *dst)
{
	u8 *buf = ctx->buffer;
	u128 lengths;

	lengths.a = cpu_to_be64(authlen * 8);
	lengths.b = cpu_to_be64(cryptlen * 8);

	crypto_gcm_ghash_flush(ctx);
	crypto_xor(buf, (u8 *)&lengths, 16);
	gf128mul_4k_lle((be128 *)buf, ctx->gf128);
	crypto_xor(dst, buf, 16);
}
Example #8
0
static int crypto_gcm_encrypt(struct aead_request *req)
{
    struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
    struct ablkcipher_request *abreq = &pctx->u.abreq;
    struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
    int err;

    crypto_gcm_init_crypt(abreq, req, req->cryptlen);
    ablkcipher_request_set_callback(abreq, aead_request_flags(req),
                                    gcm_encrypt_done, req);

    gctx->src = req->dst;
    gctx->cryptlen = req->cryptlen;
    gctx->complete = gcm_enc_hash_done;

    err = crypto_ablkcipher_encrypt(abreq);
    if (err)
        return err;

    err = gcm_hash(req, pctx);
    if (err)
        return err;

    crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
    gcm_enc_copy_hash(req, pctx);

    return 0;
}
Example #9
0
static void crypto_gcm_ghash_update(struct crypto_gcm_ghash_ctx *ctx,
				    const u8 *src, unsigned int srclen)
{
	u8 *dst = ctx->buffer;

	if (ctx->bytes) {
		int n = min(srclen, ctx->bytes);
		u8 *pos = dst + (16 - ctx->bytes);

		ctx->bytes -= n;
		srclen -= n;

		while (n--)
			*pos++ ^= *src++;

		if (!ctx->bytes)
			gf128mul_4k_lle((be128 *)dst, ctx->gf128);
	}

	while (srclen >= 16) {
		crypto_xor(dst, src, 16);
		gf128mul_4k_lle((be128 *)dst, ctx->gf128);
		src += 16;
		srclen -= 16;
	}

	if (srclen) {
		ctx->bytes = 16 - srclen;
		while (srclen--)
			*dst++ ^= *src++;
	}
}
Example #10
0
File: ctr.c Project: 03199618/linux
static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk,
				    struct crypto_cipher *tfm)
{
	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
		   crypto_cipher_alg(tfm)->cia_encrypt;
	unsigned int bsize = crypto_cipher_blocksize(tfm);
	u8 *ctrblk = walk->iv;
	u8 *src = walk->src.virt.addr;
	u8 *dst = walk->dst.virt.addr;
	unsigned int nbytes = walk->nbytes;

	do {
		/* create keystream */
		fn(crypto_cipher_tfm(tfm), dst, ctrblk);
		crypto_xor(dst, src, bsize);

		/* increment counter in counterblock */
		crypto_inc(ctrblk, bsize);

		src += bsize;
		dst += bsize;
	} while ((nbytes -= bsize) >= bsize);

	return nbytes;
}
Example #11
0
File: ctr.c Project: 03199618/linux
static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk,
				    struct crypto_cipher *tfm)
{
	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
		   crypto_cipher_alg(tfm)->cia_encrypt;
	unsigned int bsize = crypto_cipher_blocksize(tfm);
	unsigned long alignmask = crypto_cipher_alignmask(tfm);
	unsigned int nbytes = walk->nbytes;
	u8 *ctrblk = walk->iv;
	u8 *src = walk->src.virt.addr;
	u8 tmp[bsize + alignmask];
	u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);

	do {
		/* create keystream */
		fn(crypto_cipher_tfm(tfm), keystream, ctrblk);
		crypto_xor(src, keystream, bsize);

		/* increment counter in counterblock */
		crypto_inc(ctrblk, bsize);

		src += bsize;
	} while ((nbytes -= bsize) >= bsize);

	return nbytes;
}
Example #12
0
static void __gcm_hash_final_done(struct aead_request *req, int err)
{
    struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
    struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;

    if (!err)
        crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);

    gctx->complete(req, err);
}
static int crypt_iv_tcw_whitening(struct crypt_config *cc,
				  struct dm_crypt_request *dmreq,
				  u8 *data)
{
	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
	u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
	u8 buf[TCW_WHITENING_SIZE];
	struct {
		struct shash_desc desc;
		char ctx[crypto_shash_descsize(tcw->crc32_tfm)];
	} sdesc;
	int i, r;

	/* xor whitening with sector number */
	memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
	crypto_xor(buf, (u8 *)&sector, 8);
	crypto_xor(&buf[8], (u8 *)&sector, 8);

	/* calculate crc32 for every 32bit part and xor it */
	sdesc.desc.tfm = tcw->crc32_tfm;
	sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
	for (i = 0; i < 4; i++) {
		r = crypto_shash_init(&sdesc.desc);
		if (r)
			goto out;
		r = crypto_shash_update(&sdesc.desc, &buf[i * 4], 4);
		if (r)
			goto out;
		r = crypto_shash_final(&sdesc.desc, &buf[i * 4]);
		if (r)
			goto out;
	}
	crypto_xor(&buf[0], &buf[12], 4);
	crypto_xor(&buf[4], &buf[8], 4);

	/* apply whitening (8 bytes) to whole sector */
	for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
		crypto_xor(data + i * 8, buf, 8);
out:
	memzero_explicit(buf, sizeof(buf));
	return r;
}
Example #14
0
static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
			     struct scatterlist *dst,
			     struct scatterlist *src, unsigned int nbytes)
{
	struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	struct blkcipher_walk walk;
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt(desc, &walk);

	while (walk.nbytes) {
		u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
		u8 *src = walk.src.virt.addr;

		if (walk.dst.virt.addr == walk.src.virt.addr) {
			u8 *iv = walk.iv;

			do {
				crypto_xor(src, iv, AES_BLOCK_SIZE);
				AES_encrypt(src, src, &ctx->enc);
				iv = src;
				src += AES_BLOCK_SIZE;
			} while (--blocks);
			memcpy(walk.iv, iv, AES_BLOCK_SIZE);
		} else {
			u8 *dst = walk.dst.virt.addr;

			do {
				crypto_xor(walk.iv, src, AES_BLOCK_SIZE);
				AES_encrypt(walk.iv, dst, &ctx->enc);
				memcpy(walk.iv, dst, AES_BLOCK_SIZE);
				src += AES_BLOCK_SIZE;
				dst += AES_BLOCK_SIZE;
			} while (--blocks);
		}
		err = blkcipher_walk_done(desc, &walk, 0);
	}
	return err;
}
Example #15
0
static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq,
			unsigned int ivsize)
{
	unsigned int len = ivsize;

	if (ivsize > sizeof(u64)) {
		memset(info, 0, ivsize - sizeof(u64));
		len = sizeof(u64);
	}
	seq = cpu_to_be64(seq);
	memcpy(info + ivsize - len, &seq, len);
	crypto_xor(info, ctx->salt, ivsize);
}
Example #16
0
static int crypto_gcm_verify(struct aead_request *req,
                             struct crypto_gcm_req_priv_ctx *pctx)
{
    struct crypto_aead *aead = crypto_aead_reqtfm(req);
    u8 *auth_tag = pctx->auth_tag;
    u8 *iauth_tag = pctx->iauth_tag;
    unsigned int authsize = crypto_aead_authsize(aead);
    unsigned int cryptlen = req->cryptlen - authsize;

    crypto_xor(auth_tag, iauth_tag, 16);
    scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0);
    return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
}
Example #17
0
static void compute_mac(struct crypto_cipher *tfm, u8 *data, int n,
		       struct crypto_ccm_req_priv_ctx *pctx)
{
	unsigned int bs = 16;
	u8 *odata = pctx->odata;
	u8 *idata = pctx->idata;
	int datalen, getlen;

	datalen = n;

	/* first time in here, block may be partially filled. */
	getlen = bs - pctx->ilen;
	if (datalen >= getlen) {
		memcpy(idata + pctx->ilen, data, getlen);
		crypto_xor(odata, idata, bs);
		crypto_cipher_encrypt_one(tfm, odata, odata);
		datalen -= getlen;
		data += getlen;
		pctx->ilen = 0;
	}

	/* now encrypt rest of data */
	while (datalen >= bs) {
		crypto_xor(odata, data, bs);
		crypto_cipher_encrypt_one(tfm, odata, odata);

		datalen -= bs;
		data += bs;
	}

	/* check and see if there's leftover data that wasn't
	 * enough to fill a block.
	 */
	if (datalen) {
		memcpy(idata + pctx->ilen, data, datalen);
		pctx->ilen += datalen;
	}
}
Example #18
0
static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
			    struct blkcipher_walk *walk)
{
	u8 *ctrblk = walk->iv;
	u8 keystream[AES_BLOCK_SIZE];
	u8 *src = walk->src.virt.addr;
	u8 *dst = walk->dst.virt.addr;
	unsigned int nbytes = walk->nbytes;

	aesni_enc(ctx, keystream, ctrblk);
	crypto_xor(keystream, src, nbytes);
	memcpy(dst, keystream, nbytes);
	crypto_inc(ctrblk, AES_BLOCK_SIZE);
}
Example #19
0
static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
			    struct dm_crypt_request *dmreq)
{
	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
	u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
	u8 *src;
	int r = 0;

	/* Remove whitening from ciphertext */
	if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
		src = kmap_atomic(sg_page(&dmreq->sg_in));
		r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset);
		kunmap_atomic(src);
	}

	/* Calculate IV */
	memcpy(iv, tcw->iv_seed, cc->iv_size);
	crypto_xor(iv, (u8 *)&sector, 8);
	if (cc->iv_size > 8)
		crypto_xor(&iv[8], (u8 *)&sector, cc->iv_size - 8);

	return r;
}
Example #20
0
static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
			   u32 abytes, u32 *macp)
{
	if (crypto_simd_usable()) {
		kernel_neon_begin();
		ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc,
				     num_rounds(key));
		kernel_neon_end();
	} else {
		if (*macp > 0 && *macp < AES_BLOCK_SIZE) {
			int added = min(abytes, AES_BLOCK_SIZE - *macp);

			crypto_xor(&mac[*macp], in, added);

			*macp += added;
			in += added;
			abytes -= added;
		}

		while (abytes >= AES_BLOCK_SIZE) {
			__aes_arm64_encrypt(key->key_enc, mac, mac,
					    num_rounds(key));
			crypto_xor(mac, in, AES_BLOCK_SIZE);

			in += AES_BLOCK_SIZE;
			abytes -= AES_BLOCK_SIZE;
		}

		if (abytes > 0) {
			__aes_arm64_encrypt(key->key_enc, mac, mac,
					    num_rounds(key));
			crypto_xor(mac, in, abytes);
			*macp = abytes;
		}
	}
}
Example #21
0
static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx,
			    struct blkcipher_walk *walk)
{
	u8 *ctrblk = walk->iv;
	u8 keystream[DES3_EDE_BLOCK_SIZE];
	u8 *src = walk->src.virt.addr;
	u8 *dst = walk->dst.virt.addr;
	unsigned int nbytes = walk->nbytes;

	des3_ede_enc_blk(ctx, keystream, ctrblk);
	crypto_xor(keystream, src, nbytes);
	memcpy(dst, keystream, nbytes);

	crypto_inc(ctrblk, DES3_EDE_BLOCK_SIZE);
}
Example #22
0
static int aesbs_ctr_encrypt(struct blkcipher_desc *desc,
			     struct scatterlist *dst, struct scatterlist *src,
			     unsigned int nbytes)
{
	struct aesbs_ctr_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	struct blkcipher_walk walk;
	u32 blocks;
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);

	while ((blocks = walk.nbytes / AES_BLOCK_SIZE)) {
		u32 tail = walk.nbytes % AES_BLOCK_SIZE;
		__be32 *ctr = (__be32 *)walk.iv;
		u32 headroom = UINT_MAX - be32_to_cpu(ctr[3]);

		/* avoid 32 bit counter overflow in the NEON code */
		if (unlikely(headroom < blocks)) {
			blocks = headroom + 1;
			tail = walk.nbytes - blocks * AES_BLOCK_SIZE;
		}
		kernel_neon_begin();
		bsaes_ctr32_encrypt_blocks(walk.src.virt.addr,
					   walk.dst.virt.addr, blocks,
					   &ctx->enc, walk.iv);
		kernel_neon_end();
		inc_be128_ctr(ctr, blocks);

		nbytes -= blocks * AES_BLOCK_SIZE;
		if (nbytes && nbytes == tail && nbytes <= AES_BLOCK_SIZE)
			break;

		err = blkcipher_walk_done(desc, &walk, tail);
	}
	if (walk.nbytes) {
		u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
		u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
		u8 ks[AES_BLOCK_SIZE];

		AES_encrypt(walk.iv, ks, &ctx->enc.rk);
		if (tdst != tsrc)
			memcpy(tdst, tsrc, nbytes);
		crypto_xor(tdst, ks, nbytes);
		err = blkcipher_walk_done(desc, &walk, 0);
	}
	return err;
}
Example #23
0
static void ctr_crypt_final(struct blkcipher_desc *desc,
			    struct blkcipher_walk *walk)
{
	struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	u8 *ctrblk = walk->iv;
	u8 keystream[CAST5_BLOCK_SIZE];
	u8 *src = walk->src.virt.addr;
	u8 *dst = walk->dst.virt.addr;
	unsigned int nbytes = walk->nbytes;

	__cast5_encrypt(ctx, keystream, ctrblk);
	crypto_xor(keystream, src, nbytes);
	memcpy(dst, keystream, nbytes);

	crypto_inc(ctrblk, CAST5_BLOCK_SIZE);
}
Example #24
0
static void gcm_encrypt_done(struct crypto_async_request *areq, int err)
{
    struct aead_request *req = areq->data;
    struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);

    if (!err) {
        err = gcm_hash(req, pctx);
        if (err == -EINPROGRESS || err == -EBUSY)
            return;
        else if (!err) {
            crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
            gcm_enc_copy_hash(req, pctx);
        }
    }

    aead_request_complete(req, err);
}
Example #25
0
File: ctr.c Project: 03199618/linux
static void crypto_ctr_crypt_final(struct blkcipher_walk *walk,
				   struct crypto_cipher *tfm)
{
	unsigned int bsize = crypto_cipher_blocksize(tfm);
	unsigned long alignmask = crypto_cipher_alignmask(tfm);
	u8 *ctrblk = walk->iv;
	u8 tmp[bsize + alignmask];
	u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
	u8 *src = walk->src.virt.addr;
	u8 *dst = walk->dst.virt.addr;
	unsigned int nbytes = walk->nbytes;

	crypto_cipher_encrypt_one(tfm, keystream, ctrblk);
	crypto_xor(keystream, src, nbytes);
	memcpy(dst, keystream, nbytes);

	crypto_inc(ctrblk, bsize);
}
Example #26
0
static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
                struct blkcipher_walk *walk)
{
    u8 *ctrblk = walk->iv;
    u8 keystream[AES_BLOCK_SIZE];
    u8 *src = walk->src.virt.addr;
    u8 *dst = walk->dst.virt.addr;
    unsigned int nbytes = walk->nbytes;

    pagefault_disable();
    enable_kernel_altivec();
    aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
    pagefault_enable();

    crypto_xor(keystream, src, nbytes);
    memcpy(dst, keystream, nbytes);
    crypto_inc(ctrblk, AES_BLOCK_SIZE);
}
Example #27
0
static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
			     struct scatterlist *dst,
			     struct scatterlist *src, unsigned int nbytes)
{
	struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	struct blkcipher_walk walk;
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);

	while ((walk.nbytes / AES_BLOCK_SIZE) >= 8) {
		kernel_neon_begin();
		bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
				  walk.nbytes, &ctx->dec, walk.iv);
		kernel_neon_end();
		err = blkcipher_walk_done(desc, &walk, 0);
	}
	while (walk.nbytes) {
		u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
		u8 *dst = walk.dst.virt.addr;
		u8 *src = walk.src.virt.addr;
		u8 bk[2][AES_BLOCK_SIZE];
		u8 *iv = walk.iv;

		do {
			if (walk.dst.virt.addr == walk.src.virt.addr)
				memcpy(bk[blocks & 1], src, AES_BLOCK_SIZE);

			AES_decrypt(src, dst, &ctx->dec.rk);
			crypto_xor(dst, iv, AES_BLOCK_SIZE);

			if (walk.dst.virt.addr == walk.src.virt.addr)
				iv = bk[blocks & 1];
			else
				iv = src;

			dst += AES_BLOCK_SIZE;
			src += AES_BLOCK_SIZE;
		} while (--blocks);
		err = blkcipher_walk_done(desc, &walk, 0);
	}
	return err;
}
Example #28
0
static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
                             struct dm_crypt_request *dmreq)
{
    u8 *dst;
    int r;

    if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
        return 0;

    dst = kmap_atomic(sg_page(&dmreq->sg_out));
    r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);

    /* Tweak the first block of plaintext sector */
    if (!r)
        crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);

    kunmap_atomic(dst);
    return r;
}
Example #29
0
static void get_data_to_compute(struct crypto_cipher *tfm,
			       struct crypto_ccm_req_priv_ctx *pctx,
			       struct scatterlist *sg, unsigned int len)
{
	struct scatter_walk walk;
	u8 *data_src;
	int n;

	scatterwalk_start(&walk, sg);

	while (len) {
		n = scatterwalk_clamp(&walk, len);
		if (!n) {
			scatterwalk_start(&walk, sg_next(walk.sg));
			n = scatterwalk_clamp(&walk, len);
		}
		data_src = scatterwalk_map(&walk);

		compute_mac(tfm, data_src, n, pctx);
		len -= n;

		scatterwalk_unmap(data_src);
		scatterwalk_advance(&walk, n);
		scatterwalk_done(&walk, 0, len);
		if (len)
			crypto_yield(pctx->flags);
	}

	/* any leftover needs padding and then encrypted */
	if (pctx->ilen) {
		int padlen;
		u8 *odata = pctx->odata;
		u8 *idata = pctx->idata;

		padlen = 16 - pctx->ilen;
		memset(idata + pctx->ilen, 0, padlen);
		crypto_xor(odata, idata, 16);
		crypto_cipher_encrypt_one(tfm, odata, odata);
		pctx->ilen = 0;
	}
}
Example #30
0
static int crypto_pcbc_encrypt_segment(struct skcipher_request *req,
				       struct skcipher_walk *walk,
				       struct crypto_cipher *tfm)
{
	int bsize = crypto_cipher_blocksize(tfm);
	unsigned int nbytes = walk->nbytes;
	u8 *src = walk->src.virt.addr;
	u8 *dst = walk->dst.virt.addr;
	u8 *iv = walk->iv;

	do {
		crypto_xor(iv, src, bsize);
		crypto_cipher_encrypt_one(tfm, dst, iv);
		crypto_xor_cpy(iv, dst, src, bsize);

		src += bsize;
		dst += bsize;
	} while ((nbytes -= bsize) >= bsize);

	return nbytes;
}