Ejemplo n.º 1
0
Archivo: ofb.c Proyecto: avagin/linux
static int crypto_ofb_crypt(struct skcipher_request *req)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
	const unsigned int bsize = crypto_cipher_blocksize(cipher);
	struct skcipher_walk walk;
	int err;

	err = skcipher_walk_virt(&walk, req, false);

	while (walk.nbytes >= bsize) {
		const u8 *src = walk.src.virt.addr;
		u8 *dst = walk.dst.virt.addr;
		u8 * const iv = walk.iv;
		unsigned int nbytes = walk.nbytes;

		do {
			crypto_cipher_encrypt_one(cipher, iv, iv);
			crypto_xor_cpy(dst, src, iv, bsize);
			dst += bsize;
			src += bsize;
		} while ((nbytes -= bsize) >= bsize);

		err = skcipher_walk_done(&walk, nbytes);
	}

	if (walk.nbytes) {
		crypto_cipher_encrypt_one(cipher, walk.iv, walk.iv);
		crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, walk.iv,
			       walk.nbytes);
		err = skcipher_walk_done(&walk, 0);
	}
	return err;
}
Ejemplo n.º 2
0
Archivo: ctr.c Proyecto: Anjali05/linux
static int crypto_ctr_crypt(struct skcipher_request *req)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
	const unsigned int bsize = crypto_cipher_blocksize(cipher);
	struct skcipher_walk walk;
	unsigned int nbytes;
	int err;

	err = skcipher_walk_virt(&walk, req, false);

	while (walk.nbytes >= bsize) {
		if (walk.src.virt.addr == walk.dst.virt.addr)
			nbytes = crypto_ctr_crypt_inplace(&walk, cipher);
		else
			nbytes = crypto_ctr_crypt_segment(&walk, cipher);

		err = skcipher_walk_done(&walk, nbytes);
	}

	if (walk.nbytes) {
		crypto_ctr_crypt_final(&walk, cipher);
		err = skcipher_walk_done(&walk, 0);
	}

	return err;
}
Ejemplo n.º 3
0
static int ctr_crypt(struct skcipher_request *req)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
	struct skcipher_walk walk;
	unsigned int nbytes;
	int err;

	err = skcipher_walk_virt(&walk, req, true);

	kernel_fpu_begin();
	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
		aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
			              nbytes & AES_BLOCK_MASK, walk.iv);
		nbytes &= AES_BLOCK_SIZE - 1;
		err = skcipher_walk_done(&walk, nbytes);
	}
	if (walk.nbytes) {
		ctr_crypt_final(ctx, &walk);
		err = skcipher_walk_done(&walk, 0);
	}
	kernel_fpu_end();

	return err;
}
Ejemplo n.º 4
0
int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
			struct skcipher_request *req,
			common_glue_func_t tweak_fn, void *tweak_ctx,
			void *crypt_ctx)
{
	const unsigned int bsize = 128 / 8;
	struct skcipher_walk walk;
	bool fpu_enabled = false;
	unsigned int nbytes;
	int err;

	err = skcipher_walk_virt(&walk, req, false);
	nbytes = walk.nbytes;
	if (!nbytes)
		return err;

	/* set minimum length to bsize, for tweak_fn */
	fpu_enabled = glue_skwalk_fpu_begin(bsize, gctx->fpu_blocks_limit,
					    &walk, fpu_enabled,
					    nbytes < bsize ? bsize : nbytes);

	/* calculate first value of T */
	tweak_fn(tweak_ctx, walk.iv, walk.iv);

	while (nbytes) {
		nbytes = __glue_xts_req_128bit(gctx, crypt_ctx, &walk);

		err = skcipher_walk_done(&walk, nbytes);
		nbytes = walk.nbytes;
	}

	glue_fpu_end(fpu_enabled);

	return err;
}
Ejemplo n.º 5
0
static int __ecb_crypt(struct skcipher_request *req,
		       void (*fn)(u8 out[], u8 const in[], u8 const rk[],
				  int rounds, int blocks))
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct skcipher_walk walk;
	int err;

	err = skcipher_walk_virt(&walk, req, true);

	kernel_neon_begin();
	while (walk.nbytes >= AES_BLOCK_SIZE) {
		unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;

		if (walk.nbytes < walk.total)
			blocks = round_down(blocks,
					    walk.stride / AES_BLOCK_SIZE);

		fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk,
		   ctx->rounds, blocks);
		err = skcipher_walk_done(&walk,
					 walk.nbytes - blocks * AES_BLOCK_SIZE);
	}
	kernel_neon_end();

	return err;
}
Ejemplo n.º 6
0
static int chacha20_neon(struct skcipher_request *req)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct chacha20_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct skcipher_walk walk;
	u32 state[16];
	int err;

	if (!may_use_simd() || req->cryptlen <= CHACHA20_BLOCK_SIZE)
		return crypto_chacha20_crypt(req);

	err = skcipher_walk_virt(&walk, req, true);

	crypto_chacha20_init(state, ctx, walk.iv);

	kernel_neon_begin();
	while (walk.nbytes > 0) {
		unsigned int nbytes = walk.nbytes;

		if (nbytes < walk.total)
			nbytes = round_down(nbytes, walk.stride);

		chacha20_doneon(state, walk.dst.virt.addr, walk.src.virt.addr,
				nbytes);
		err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
	}
	kernel_neon_end();

	return err;
}
Ejemplo n.º 7
0
static int cbc_decrypt(struct skcipher_request *req)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct skcipher_walk walk;
	int err;

	err = skcipher_walk_virt(&walk, req, true);

	kernel_neon_begin();
	while (walk.nbytes >= AES_BLOCK_SIZE) {
		unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;

		if (walk.nbytes < walk.total)
			blocks = round_down(blocks,
					    walk.stride / AES_BLOCK_SIZE);

		aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
				  ctx->key.rk, ctx->key.rounds, blocks,
				  walk.iv);
		err = skcipher_walk_done(&walk,
					 walk.nbytes - blocks * AES_BLOCK_SIZE);
	}
	kernel_neon_end();

	return err;
}
Ejemplo n.º 8
0
static void crypto_aegis256_aesni_process_crypt(
		struct aegis_state *state, struct aead_request *req,
		const struct aegis_crypt_ops *ops)
{
	struct skcipher_walk walk;
	u8 *src, *dst;
	unsigned int chunksize, base;

	ops->skcipher_walk_init(&walk, req, false);

	while (walk.nbytes) {
		src = walk.src.virt.addr;
		dst = walk.dst.virt.addr;
		chunksize = walk.nbytes;

		ops->crypt_blocks(state, chunksize, src, dst);

		base = chunksize & ~(AEGIS256_BLOCK_SIZE - 1);
		src += base;
		dst += base;
		chunksize &= AEGIS256_BLOCK_SIZE - 1;

		if (chunksize > 0)
			ops->crypt_tail(state, chunksize, src, dst);

		skcipher_walk_done(&walk, 0);
	}
}
Ejemplo n.º 9
0
Archivo: lrw.c Proyecto: avagin/linux
/*
 * We compute the tweak masks twice (both before and after the ECB encryption or
 * decryption) to avoid having to allocate a temporary buffer and/or make
 * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
 * just doing the next_index() calls again.
 */
static int xor_tweak(struct skcipher_request *req, bool second_pass)
{
	const int bs = LRW_BLOCK_SIZE;
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct priv *ctx = crypto_skcipher_ctx(tfm);
	struct rctx *rctx = skcipher_request_ctx(req);
	be128 t = rctx->t;
	struct skcipher_walk w;
	__be32 *iv;
	u32 counter[4];
	int err;

	if (second_pass) {
		req = &rctx->subreq;
		/* set to our TFM to enforce correct alignment: */
		skcipher_request_set_tfm(req, tfm);
	}

	err = skcipher_walk_virt(&w, req, false);
	if (err)
		return err;

	iv = (__be32 *)w.iv;
	counter[0] = be32_to_cpu(iv[3]);
	counter[1] = be32_to_cpu(iv[2]);
	counter[2] = be32_to_cpu(iv[1]);
	counter[3] = be32_to_cpu(iv[0]);

	while (w.nbytes) {
		unsigned int avail = w.nbytes;
		be128 *wsrc;
		be128 *wdst;

		wsrc = w.src.virt.addr;
		wdst = w.dst.virt.addr;

		do {
			be128_xor(wdst++, &t, wsrc++);

			/* T <- I*Key2, using the optimization
			 * discussed in the specification */
			be128_xor(&t, &t, &ctx->mulinc[next_index(counter)]);
		} while ((avail -= bs) >= bs);

		if (second_pass && w.nbytes == w.total) {
			iv[0] = cpu_to_be32(counter[3]);
			iv[1] = cpu_to_be32(counter[2]);
			iv[2] = cpu_to_be32(counter[1]);
			iv[3] = cpu_to_be32(counter[0]);
		}

		err = skcipher_walk_done(&w, avail);
	}

	return err;
}
Ejemplo n.º 10
0
static int ccm_encrypt(struct aead_request *req)
{
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
	struct skcipher_walk walk;
	u8 __aligned(8) mac[AES_BLOCK_SIZE];
	u8 buf[AES_BLOCK_SIZE];
	u32 len = req->cryptlen;
	int err;

	err = ccm_init_mac(req, mac, len);
	if (err)
		return err;

	if (req->assoclen)
		ccm_calculate_auth_mac(req, mac);

	/* preserve the original iv for the final round */
	memcpy(buf, req->iv, AES_BLOCK_SIZE);

	err = skcipher_walk_aead_encrypt(&walk, req, false);

	if (crypto_simd_usable()) {
		while (walk.nbytes) {
			u32 tail = walk.nbytes % AES_BLOCK_SIZE;

			if (walk.nbytes == walk.total)
				tail = 0;

			kernel_neon_begin();
			ce_aes_ccm_encrypt(walk.dst.virt.addr,
					   walk.src.virt.addr,
					   walk.nbytes - tail, ctx->key_enc,
					   num_rounds(ctx), mac, walk.iv);
			kernel_neon_end();

			err = skcipher_walk_done(&walk, tail);
		}
		if (!err) {
			kernel_neon_begin();
			ce_aes_ccm_final(mac, buf, ctx->key_enc,
					 num_rounds(ctx));
			kernel_neon_end();
		}
	} else {
		err = ccm_crypt_fallback(&walk, mac, buf, ctx, true);
	}
	if (err)
		return err;

	/* copy authtag to end of dst */
	scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen,
				 crypto_aead_authsize(aead), 1);

	return 0;
}
static int chacha20_simd(struct skcipher_request *req)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct chacha20_ctx *ctx = crypto_skcipher_ctx(tfm);
	u32 *state, state_buf[16 + 2] __aligned(8);
	struct skcipher_walk walk;
	int err;

	BUILD_BUG_ON(CHACHA20_STATE_ALIGN != 16);
	state = PTR_ALIGN(state_buf + 0, CHACHA20_STATE_ALIGN);

	if (req->cryptlen <= CHACHA20_BLOCK_SIZE || !may_use_simd())
		return crypto_chacha20_crypt(req);

	err = skcipher_walk_virt(&walk, req, true);

	crypto_chacha20_init(state, ctx, walk.iv);

	kernel_fpu_begin();

	while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
		chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
				rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE));
		err = skcipher_walk_done(&walk,
					 walk.nbytes % CHACHA20_BLOCK_SIZE);
	}

	if (walk.nbytes) {
		chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
				walk.nbytes);
		err = skcipher_walk_done(&walk, 0);
	}

	kernel_fpu_end();

	return err;
}
Ejemplo n.º 12
0
static int ccm_crypt_fallback(struct skcipher_walk *walk, u8 mac[], u8 iv0[],
			      struct crypto_aes_ctx *ctx, bool enc)
{
	u8 buf[AES_BLOCK_SIZE];
	int err = 0;

	while (walk->nbytes) {
		int blocks = walk->nbytes / AES_BLOCK_SIZE;
		u32 tail = walk->nbytes % AES_BLOCK_SIZE;
		u8 *dst = walk->dst.virt.addr;
		u8 *src = walk->src.virt.addr;
		u32 nbytes = walk->nbytes;

		if (nbytes == walk->total && tail > 0) {
			blocks++;
			tail = 0;
		}

		do {
			u32 bsize = AES_BLOCK_SIZE;

			if (nbytes < AES_BLOCK_SIZE)
				bsize = nbytes;

			crypto_inc(walk->iv, AES_BLOCK_SIZE);
			__aes_arm64_encrypt(ctx->key_enc, buf, walk->iv,
					    num_rounds(ctx));
			__aes_arm64_encrypt(ctx->key_enc, mac, mac,
					    num_rounds(ctx));
			if (enc)
				crypto_xor(mac, src, bsize);
			crypto_xor_cpy(dst, src, buf, bsize);
			if (!enc)
				crypto_xor(mac, dst, bsize);
			dst += bsize;
			src += bsize;
			nbytes -= bsize;
		} while (--blocks);

		err = skcipher_walk_done(walk, tail);
	}

	if (!err) {
		__aes_arm64_encrypt(ctx->key_enc, buf, iv0, num_rounds(ctx));
		__aes_arm64_encrypt(ctx->key_enc, mac, mac, num_rounds(ctx));
		crypto_xor(mac, buf, AES_BLOCK_SIZE);
	}
	return err;
}
Ejemplo n.º 13
0
static int ctr_encrypt(struct skcipher_request *req)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct skcipher_walk walk;
	int err, blocks;

	err = skcipher_walk_virt(&walk, req, true);

	kernel_neon_begin();
	while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
		ce_aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
				   (u8 *)ctx->key_enc, num_rounds(ctx), blocks,
				   walk.iv);
		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
	}
	if (walk.nbytes) {
		u8 __aligned(8) tail[AES_BLOCK_SIZE];
		unsigned int nbytes = walk.nbytes;
		u8 *tdst = walk.dst.virt.addr;
		u8 *tsrc = walk.src.virt.addr;

		/*
		 * Tell aes_ctr_encrypt() to process a tail block.
		 */
		blocks = -1;

		ce_aes_ctr_encrypt(tail, NULL, (u8 *)ctx->key_enc,
				   num_rounds(ctx), blocks, walk.iv);
		crypto_xor_cpy(tdst, tsrc, tail, nbytes);
		err = skcipher_walk_done(&walk, 0);
	}
	kernel_neon_end();

	return err;
}
Ejemplo n.º 14
0
static int crypto_cbc_decrypt(struct skcipher_request *req)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct skcipher_walk walk;
	int err;

	err = skcipher_walk_virt(&walk, req, false);

	while (walk.nbytes) {
		err = crypto_cbc_decrypt_blocks(&walk, tfm,
						crypto_cbc_decrypt_one);
		err = skcipher_walk_done(&walk, err);
	}

	return err;
}
Ejemplo n.º 15
0
static int ecb_decrypt(struct skcipher_request *req)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct skcipher_walk walk;
	unsigned int blocks;
	int err;

	err = skcipher_walk_virt(&walk, req, true);

	kernel_neon_begin();
	while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
		ce_aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
				   (u8 *)ctx->key_dec, num_rounds(ctx), blocks);
		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
	}
	kernel_neon_end();
	return err;
}
Ejemplo n.º 16
0
static int xts_decrypt(struct skcipher_request *req)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
	int err, first, rounds = num_rounds(&ctx->key1);
	struct skcipher_walk walk;
	unsigned int blocks;

	err = skcipher_walk_virt(&walk, req, true);

	kernel_neon_begin();
	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
		ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
				   (u8 *)ctx->key1.key_dec, rounds, blocks,
				   walk.iv, (u8 *)ctx->key2.key_enc, first);
		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
	}
	kernel_neon_end();

	return err;
}
Ejemplo n.º 17
0
static int crypto_pcbc_encrypt(struct skcipher_request *req)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct crypto_cipher *child = ctx->child;
	struct skcipher_walk walk;
	unsigned int nbytes;
	int err;

	err = skcipher_walk_virt(&walk, req, false);

	while ((nbytes = walk.nbytes)) {
		if (walk.src.virt.addr == walk.dst.virt.addr)
			nbytes = crypto_pcbc_encrypt_inplace(req, &walk,
							     child);
		else
			nbytes = crypto_pcbc_encrypt_segment(req, &walk,
							     child);
		err = skcipher_walk_done(&walk, nbytes);
	}

	return err;
}
Ejemplo n.º 18
0
static int cbc_encrypt(struct skcipher_request *req)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct skcipher_walk walk;
	int err, first = 1;

	err = skcipher_walk_virt(&walk, req, true);

	kernel_neon_begin();
	while (walk.nbytes >= AES_BLOCK_SIZE) {
		unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;

		/* fall back to the non-bitsliced NEON implementation */
		neon_aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
				     ctx->enc, ctx->key.rounds, blocks, walk.iv,
				     first);
		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
		first = 0;
	}
	kernel_neon_end();
	return err;
}
Ejemplo n.º 19
0
static int chacha_stream_xor(struct skcipher_request *req,
			     struct chacha_ctx *ctx, u8 *iv)
{
	struct skcipher_walk walk;
	u32 state[16];
	int err;

	err = skcipher_walk_virt(&walk, req, false);

	crypto_chacha_init(state, ctx, iv);

	while (walk.nbytes > 0) {
		unsigned int nbytes = walk.nbytes;

		if (nbytes < walk.total)
			nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);

		chacha_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
			       nbytes, ctx->nrounds);
		err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
	}

	return err;
}