Beispiel #1
0
static int encrypt(struct blkcipher_desc *desc,
		   struct scatterlist *dst, struct scatterlist *src,
		   unsigned int nbytes)
{
	struct blkcipher_walk walk;
	struct crypto_blkcipher *tfm = desc->tfm;
	struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm);
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt_block(desc, &walk, 64);

	salsa20_ivsetup(ctx, walk.iv);

	while (walk.nbytes >= 64) {
		salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
				      walk.dst.virt.addr,
				      walk.nbytes - (walk.nbytes % 64));
		err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64);
	}

	if (walk.nbytes) {
		salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
				      walk.dst.virt.addr, walk.nbytes);
		err = blkcipher_walk_done(desc, &walk, 0);
	}

	return err;
}
Beispiel #2
0
static int ctr_crypt(struct blkcipher_desc *desc,
		     struct scatterlist *dst, struct scatterlist *src,
		     unsigned int nbytes)
{
	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
	struct blkcipher_walk walk;
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

	kernel_fpu_begin();
	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
		aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
			      nbytes & AES_BLOCK_MASK, walk.iv);
		nbytes &= AES_BLOCK_SIZE - 1;
		err = blkcipher_walk_done(desc, &walk, nbytes);
	}
	if (walk.nbytes) {
		ctr_crypt_final(ctx, &walk);
		err = blkcipher_walk_done(desc, &walk, 0);
	}
	kernel_fpu_end();

	return err;
}
Beispiel #3
0
static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
    struct scatterlist *dst, struct scatterlist *src,
    unsigned int nbytes)
{
    int ret;
    struct blkcipher_walk walk;
    struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(
            crypto_blkcipher_tfm(desc->tfm));
    struct blkcipher_desc fallback_desc = {
        .tfm = ctx->fallback,
        .info = desc->info,
        .flags = desc->flags
    };

    if (in_interrupt()) {
        ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes);
    } else {
        blkcipher_walk_init(&walk, dst, src, nbytes);
        ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
        while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
            pagefault_disable();
            enable_kernel_altivec();
            aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, walk.dst.virt.addr,
                (nbytes & AES_BLOCK_MASK)/AES_BLOCK_SIZE, &ctx->enc_key, walk.iv);
            pagefault_enable();

            crypto_inc(walk.iv, AES_BLOCK_SIZE);
            nbytes &= AES_BLOCK_SIZE - 1;
            ret = blkcipher_walk_done(desc, &walk, nbytes);
        }
        if (walk.nbytes) {
            p8_aes_ctr_final(ctx, &walk);
            ret = blkcipher_walk_done(desc, &walk, 0);
        }
    }

    return ret;
}

struct crypto_alg p8_aes_ctr_alg = {
    .cra_name = "ctr(aes)",
    .cra_driver_name = "p8_aes_ctr",
    .cra_module = THIS_MODULE,
    .cra_priority = 1000,
    .cra_type = &crypto_blkcipher_type,
    .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
    .cra_alignmask = 0,
    .cra_blocksize = 1,
    .cra_ctxsize = sizeof(struct p8_aes_ctr_ctx),
    .cra_init = p8_aes_ctr_init,
    .cra_exit = p8_aes_ctr_exit,
    .cra_blkcipher = {
        .ivsize = 0,
        .min_keysize = AES_MIN_KEY_SIZE,
        .max_keysize = AES_MAX_KEY_SIZE,
        .setkey = p8_aes_ctr_setkey,
        .encrypt = p8_aes_ctr_crypt,
        .decrypt = p8_aes_ctr_crypt,
    },
};
Beispiel #4
0
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		     struct scatterlist *src, unsigned int nbytes)
{
	bool fpu_enabled = false;
	struct blkcipher_walk walk;
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt_block(desc, &walk, CAST5_BLOCK_SIZE);
	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

	while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
		fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
		nbytes = __ctr_crypt(desc, &walk);
		err = blkcipher_walk_done(desc, &walk, nbytes);
	}

	cast5_fpu_end(fpu_enabled);

	if (walk.nbytes) {
		ctr_crypt_final(desc, &walk);
		err = blkcipher_walk_done(desc, &walk, 0);
	}

	return err;
}
Beispiel #5
0
static int crypto_ctr_crypt(struct blkcipher_desc *desc,
			      struct scatterlist *dst, struct scatterlist *src,
			      unsigned int nbytes)
{
	struct blkcipher_walk walk;
	struct crypto_blkcipher *tfm = desc->tfm;
	struct crypto_ctr_ctx *ctx = crypto_blkcipher_ctx(tfm);
	struct crypto_cipher *child = ctx->child;
	unsigned int bsize = crypto_cipher_blocksize(child);
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt_block(desc, &walk, bsize);

	while (walk.nbytes >= bsize) {
		if (walk.src.virt.addr == walk.dst.virt.addr)
			nbytes = crypto_ctr_crypt_inplace(&walk, child);
		else
			nbytes = crypto_ctr_crypt_segment(&walk, child);

		err = blkcipher_walk_done(desc, &walk, nbytes);
	}

	if (walk.nbytes) {
		crypto_ctr_crypt_final(&walk, child);
		err = blkcipher_walk_done(desc, &walk, 0);
	}

	return err;
}
Beispiel #6
0
int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
			  struct blkcipher_desc *desc, struct scatterlist *dst,
			  struct scatterlist *src, unsigned int nbytes)
{
	const unsigned int bsize = 128 / 8;
	bool fpu_enabled = false;
	struct blkcipher_walk walk;
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt_block(desc, &walk, bsize);

	while ((nbytes = walk.nbytes) >= bsize) {
		fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
					     desc, fpu_enabled, nbytes);
		nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
		err = blkcipher_walk_done(desc, &walk, nbytes);
	}

	glue_fpu_end(fpu_enabled);

	if (walk.nbytes) {
		glue_ctr_crypt_final_128bit(
			gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
		err = blkcipher_walk_done(desc, &walk, 0);
	}

	return err;
}
static int chacha20_simd(struct blkcipher_desc *desc, struct scatterlist *dst,
			 struct scatterlist *src, unsigned int nbytes)
{
	struct blkcipher_walk walk;
	u32 state[16];
	int err;

	if (nbytes <= CHACHA20_BLOCK_SIZE || !may_use_simd())
		return crypto_chacha20_crypt(desc, dst, src, nbytes);

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE);

	crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);

	kernel_neon_begin();

	while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
		chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
				rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE));
		err = blkcipher_walk_done(desc, &walk,
					  walk.nbytes % CHACHA20_BLOCK_SIZE);
	}

	if (walk.nbytes) {
		chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
				walk.nbytes);
		err = blkcipher_walk_done(desc, &walk, 0);
	}

	kernel_neon_end();

	return err;
}
Beispiel #8
0
static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
			  struct blkcipher_walk *walk)
{
	struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	u8 buf[AES_BLOCK_SIZE], *ctrptr;
	unsigned int nbytes, n, k;
	int ret, locked;

	locked = spin_trylock(&ctrblk_lock);

	ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
	while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
		n = AES_BLOCK_SIZE;
		if (nbytes >= 2*AES_BLOCK_SIZE && locked)
			n = __ctrblk_init(ctrblk, walk->iv, nbytes);
		ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
		k = cpacf_kmctr(ctx->fc | modifier, ctx->pk.protkey,
				walk->dst.virt.addr, walk->src.virt.addr,
				n, ctrptr);
		if (k) {
			if (ctrptr == ctrblk)
				memcpy(walk->iv, ctrptr + k - AES_BLOCK_SIZE,
				       AES_BLOCK_SIZE);
			crypto_inc(walk->iv, AES_BLOCK_SIZE);
			ret = blkcipher_walk_done(desc, walk, nbytes - n);
		}
		if (k < n) {
			if (__ctr_paes_set_key(ctx) != 0) {
				if (locked)
					spin_unlock(&ctrblk_lock);
				return blkcipher_walk_done(desc, walk, -EIO);
			}
		}
	}
	if (locked)
		spin_unlock(&ctrblk_lock);
	/*
	 * final block may be < AES_BLOCK_SIZE, copy only nbytes
	 */
	if (nbytes) {
		while (1) {
			if (cpacf_kmctr(ctx->fc | modifier,
					ctx->pk.protkey, buf,
					walk->src.virt.addr, AES_BLOCK_SIZE,
					walk->iv) == AES_BLOCK_SIZE)
				break;
			if (__ctr_paes_set_key(ctx) != 0)
				return blkcipher_walk_done(desc, walk, -EIO);
		}
		memcpy(walk->dst.virt.addr, buf, nbytes);
		crypto_inc(walk->iv, AES_BLOCK_SIZE);
		ret = blkcipher_walk_done(desc, walk, 0);
	}

	return ret;
}
Beispiel #9
0
static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
			 struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
{
	int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
	unsigned int i, n, nbytes;
	u8 buf[AES_BLOCK_SIZE];
	u8 *out, *in;

	if (!walk->nbytes)
		return ret;

	memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE);
	while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
		out = walk->dst.virt.addr;
		in = walk->src.virt.addr;
		while (nbytes >= AES_BLOCK_SIZE) {
			/* only use complete blocks, max. PAGE_SIZE */
			n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
						 nbytes & ~(AES_BLOCK_SIZE - 1);
			for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
				memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
				       AES_BLOCK_SIZE);
				crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
			}
			ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
			if (ret < 0 || ret != n)
				return -EIO;
			if (n > AES_BLOCK_SIZE)
				memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
				       AES_BLOCK_SIZE);
			crypto_inc(ctrblk, AES_BLOCK_SIZE);
			out += n;
			in += n;
			nbytes -= n;
		}
		ret = blkcipher_walk_done(desc, walk, nbytes);
	}
	/*
	 * final block may be < AES_BLOCK_SIZE, copy only nbytes
	 */
	if (nbytes) {
		out = walk->dst.virt.addr;
		in = walk->src.virt.addr;
		ret = crypt_s390_kmctr(func, sctx->key, buf, in,
				       AES_BLOCK_SIZE, ctrblk);
		if (ret < 0 || ret != AES_BLOCK_SIZE)
			return -EIO;
		memcpy(out, buf, nbytes);
		crypto_inc(ctrblk, AES_BLOCK_SIZE);
		ret = blkcipher_walk_done(desc, walk, 0);
	}
	memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
	return ret;
}
Beispiel #10
0
static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
			    struct scatterlist *dst,
			    struct scatterlist *src, unsigned int nbytes)
{
	int ret;
	u64 inc;
	struct blkcipher_walk walk;
	struct p8_aes_ctr_ctx *ctx =
		crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));

	if (in_interrupt()) {
		SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
		skcipher_request_set_sync_tfm(req, ctx->fallback);
		skcipher_request_set_callback(req, desc->flags, NULL, NULL);
		skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
		ret = crypto_skcipher_encrypt(req);
		skcipher_request_zero(req);
	} else {
		blkcipher_walk_init(&walk, dst, src, nbytes);
		ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
		while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
			preempt_disable();
			pagefault_disable();
			enable_kernel_vsx();
			aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
						    walk.dst.virt.addr,
						    (nbytes &
						     AES_BLOCK_MASK) /
						    AES_BLOCK_SIZE,
						    &ctx->enc_key,
						    walk.iv);
			disable_kernel_vsx();
			pagefault_enable();
			preempt_enable();

			/* We need to update IV mostly for last bytes/round */
			inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE;
			if (inc > 0)
				while (inc--)
					crypto_inc(walk.iv, AES_BLOCK_SIZE);

			nbytes &= AES_BLOCK_SIZE - 1;
			ret = blkcipher_walk_done(desc, &walk, nbytes);
		}
		if (walk.nbytes) {
			p8_aes_ctr_final(ctx, &walk);
			ret = blkcipher_walk_done(desc, &walk, 0);
		}
	}

	return ret;
}
Beispiel #11
0
static int aesbs_ctr_encrypt(struct blkcipher_desc *desc,
			     struct scatterlist *dst, struct scatterlist *src,
			     unsigned int nbytes)
{
	struct aesbs_ctr_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	struct blkcipher_walk walk;
	u32 blocks;
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);

	while ((blocks = walk.nbytes / AES_BLOCK_SIZE)) {
		u32 tail = walk.nbytes % AES_BLOCK_SIZE;
		__be32 *ctr = (__be32 *)walk.iv;
		u32 headroom = UINT_MAX - be32_to_cpu(ctr[3]);

		/* avoid 32 bit counter overflow in the NEON code */
		if (unlikely(headroom < blocks)) {
			blocks = headroom + 1;
			tail = walk.nbytes - blocks * AES_BLOCK_SIZE;
		}
		kernel_neon_begin();
		bsaes_ctr32_encrypt_blocks(walk.src.virt.addr,
					   walk.dst.virt.addr, blocks,
					   &ctx->enc, walk.iv);
		kernel_neon_end();
		inc_be128_ctr(ctr, blocks);

		nbytes -= blocks * AES_BLOCK_SIZE;
		if (nbytes && nbytes == tail && nbytes <= AES_BLOCK_SIZE)
			break;

		err = blkcipher_walk_done(desc, &walk, tail);
	}
	if (walk.nbytes) {
		u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
		u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
		u8 ks[AES_BLOCK_SIZE];

		AES_encrypt(walk.iv, ks, &ctx->enc.rk);
		if (tdst != tsrc)
			memcpy(tdst, tsrc, nbytes);
		crypto_xor(tdst, ks, nbytes);
		err = blkcipher_walk_done(desc, &walk, 0);
	}
	return err;
}
Beispiel #12
0
/**
 * nx_build_sg_lists - walk the input scatterlists and build arrays of NX
 *                     scatterlists based on them.
 *
 * @nx_ctx: NX crypto context for the lists we're building
 * @desc: the block cipher descriptor for the operation
 * @dst: destination scatterlist
 * @src: source scatterlist
 * @nbytes: length of data described in the scatterlists
 * @iv: destination for the iv data, if the algorithm requires it
 *
 * This is common code shared by all the AES algorithms. It uses the block
 * cipher walk routines to traverse input and output scatterlists, building
 * corresponding NX scatterlists
 */
int nx_build_sg_lists(struct nx_crypto_ctx  *nx_ctx,
		      struct blkcipher_desc *desc,
		      struct scatterlist    *dst,
		      struct scatterlist    *src,
		      unsigned int           nbytes,
		      u8                    *iv)
{
	struct nx_sg *nx_insg = nx_ctx->in_sg;
	struct nx_sg *nx_outsg = nx_ctx->out_sg;
	struct blkcipher_walk walk;
	int rc;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	rc = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
	if (rc)
		goto out;

	if (iv)
		memcpy(iv, walk.iv, AES_BLOCK_SIZE);

	while (walk.nbytes) {
		nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr,
					   walk.nbytes, nx_ctx->ap->sglen);
		nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr,
					    walk.nbytes, nx_ctx->ap->sglen);

		rc = blkcipher_walk_done(desc, &walk, 0);
		if (rc)
			break;
	}

	if (walk.nbytes) {
		nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr,
					   walk.nbytes, nx_ctx->ap->sglen);
		nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr,
					    walk.nbytes, nx_ctx->ap->sglen);

		rc = 0;
	}

	/* these lengths should be negative, which will indicate to phyp that
	 * the input and output parameters are scatterlists, not linear
	 * buffers */
	nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg);
	nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg);
out:
	return rc;
}
Beispiel #13
0
static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
			    struct s390_des_ctx *ctx, struct blkcipher_walk *walk)
{
	int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
	unsigned int i, n, nbytes;
	u8 buf[DES_BLOCK_SIZE];
	u8 *out, *in;

	memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE);
	while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
		out = walk->dst.virt.addr;
		in = walk->src.virt.addr;
		while (nbytes >= DES_BLOCK_SIZE) {
			/* align to block size, max. PAGE_SIZE */
			n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
				nbytes & ~(DES_BLOCK_SIZE - 1);
			for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
				memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE,
				       DES_BLOCK_SIZE);
				crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
			}
			ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
			BUG_ON((ret < 0) || (ret != n));
			if (n > DES_BLOCK_SIZE)
				memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
				       DES_BLOCK_SIZE);
			crypto_inc(ctrblk, DES_BLOCK_SIZE);
			out += n;
			in += n;
			nbytes -= n;
		}
		ret = blkcipher_walk_done(desc, walk, nbytes);
	}

	/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
	if (nbytes) {
		out = walk->dst.virt.addr;
		in = walk->src.virt.addr;
		ret = crypt_s390_kmctr(func, ctx->key, buf, in,
				       DES_BLOCK_SIZE, ctrblk);
		BUG_ON(ret < 0 || ret != DES_BLOCK_SIZE);
		memcpy(out, buf, nbytes);
		crypto_inc(ctrblk, DES_BLOCK_SIZE);
		ret = blkcipher_walk_done(desc, walk, 0);
	}
	memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE);
	return ret;
}
Beispiel #14
0
static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		       struct scatterlist *src, unsigned int nbytes)
{
	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	int err, first, rounds = 6 + ctx->key_length / 4;
	struct blkcipher_walk walk;
	int blocks;

	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);

	first = 1;
	kernel_neon_begin();
	while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
		aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
				(u8 *)ctx->key_enc, rounds, blocks, walk.iv,
				first);
		first = 0;
		nbytes -= blocks * AES_BLOCK_SIZE;
		if (nbytes && nbytes == walk.nbytes % AES_BLOCK_SIZE)
			break;
		err = blkcipher_walk_done(desc, &walk,
					  walk.nbytes % AES_BLOCK_SIZE);
	}
	if (nbytes) {
		u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
		u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
		u8 __aligned(8) tail[AES_BLOCK_SIZE];

		/*
		 * Minimum alignment is 8 bytes, so if nbytes is <= 8, we need
		 * to tell aes_ctr_encrypt() to only read half a block.
		 */
		blocks = (nbytes <= 8) ? -1 : 1;

		aes_ctr_encrypt(tail, tsrc, (u8 *)ctx->key_enc, rounds,
				blocks, walk.iv, first);
		memcpy(tdst, tail, nbytes);
		err = blkcipher_walk_done(desc, &walk, 0);
	}
	kernel_neon_end();

	return err;
}
Beispiel #15
0
static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
			     struct scatterlist *dst,
			     struct scatterlist *src, unsigned int nbytes)
{
	struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	struct blkcipher_walk walk;
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);

	while ((walk.nbytes / AES_BLOCK_SIZE) >= 8) {
		kernel_neon_begin();
		bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
				  walk.nbytes, &ctx->dec, walk.iv);
		kernel_neon_end();
		err = blkcipher_walk_done(desc, &walk, 0);
	}
	while (walk.nbytes) {
		u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
		u8 *dst = walk.dst.virt.addr;
		u8 *src = walk.src.virt.addr;
		u8 bk[2][AES_BLOCK_SIZE];
		u8 *iv = walk.iv;

		do {
			if (walk.dst.virt.addr == walk.src.virt.addr)
				memcpy(bk[blocks & 1], src, AES_BLOCK_SIZE);

			AES_decrypt(src, dst, &ctx->dec.rk);
			crypto_xor(dst, iv, AES_BLOCK_SIZE);

			if (walk.dst.virt.addr == walk.src.virt.addr)
				iv = bk[blocks & 1];
			else
				iv = src;

			dst += AES_BLOCK_SIZE;
			src += AES_BLOCK_SIZE;
		} while (--blocks);
		err = blkcipher_walk_done(desc, &walk, 0);
	}
	return err;
}
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		     struct scatterlist *src, unsigned int nbytes)
{
	struct blkcipher_walk walk;
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt_block(desc, &walk, BF_BLOCK_SIZE);

	while ((nbytes = walk.nbytes) >= BF_BLOCK_SIZE) {
		nbytes = __ctr_crypt(desc, &walk);
		err = blkcipher_walk_done(desc, &walk, nbytes);
	}

	if (walk.nbytes) {
		ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk);
		err = blkcipher_walk_done(desc, &walk, 0);
	}

	return err;
}
Beispiel #17
0
static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
			     struct scatterlist *dst,
			     struct scatterlist *src, unsigned int nbytes)
{
	struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	struct blkcipher_walk walk;
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);

	/* generate the initial tweak */
	AES_encrypt(walk.iv, walk.iv, &ctx->twkey);

	while (walk.nbytes) {
		kernel_neon_begin();
		bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
				  walk.nbytes, &ctx->dec, walk.iv);
		kernel_neon_end();
		err = blkcipher_walk_done(desc, &walk, 0);
	}
	return err;
}
Beispiel #18
0
static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
			    struct blkcipher_walk *walk)
{
	struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	u8 buf[DES_BLOCK_SIZE], *ctrptr;
	unsigned int n, nbytes;
	int ret, locked;

	locked = spin_trylock(&ctrblk_lock);

	ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
	while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
		n = DES_BLOCK_SIZE;
		if (nbytes >= 2*DES_BLOCK_SIZE && locked)
			n = __ctrblk_init(ctrblk, walk->iv, nbytes);
		ctrptr = (n > DES_BLOCK_SIZE) ? ctrblk : walk->iv;
		cpacf_kmctr(fc, ctx->key, walk->dst.virt.addr,
			    walk->src.virt.addr, n, ctrptr);
		if (ctrptr == ctrblk)
			memcpy(walk->iv, ctrptr + n - DES_BLOCK_SIZE,
				DES_BLOCK_SIZE);
		crypto_inc(walk->iv, DES_BLOCK_SIZE);
		ret = blkcipher_walk_done(desc, walk, nbytes - n);
	}
	if (locked)
		spin_unlock(&ctrblk_lock);
	/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
	if (nbytes) {
		cpacf_kmctr(fc, ctx->key, buf, walk->src.virt.addr,
			    DES_BLOCK_SIZE, walk->iv);
		memcpy(walk->dst.virt.addr, buf, nbytes);
		crypto_inc(walk->iv, DES_BLOCK_SIZE);
		ret = blkcipher_walk_done(desc, walk, 0);
	}
	return ret;
}
Beispiel #19
0
static int chacha20_simd(struct blkcipher_desc *desc, struct scatterlist *dst,
                         struct scatterlist *src, unsigned int nbytes)
{
    u32 *state, state_buf[16 + (CHACHA20_STATE_ALIGN / sizeof(u32)) - 1];
    struct blkcipher_walk walk;
    int err;

    if (!may_use_simd())
        return crypto_chacha20_crypt(desc, dst, src, nbytes);

    state = (u32 *)roundup((uintptr_t)state_buf, CHACHA20_STATE_ALIGN);

    blkcipher_walk_init(&walk, dst, src, nbytes);
    err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE);

    crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);

    kernel_fpu_begin();

    while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
        chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
                        rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE));
        err = blkcipher_walk_done(desc, &walk,
                                  walk.nbytes % CHACHA20_BLOCK_SIZE);
    }

    if (walk.nbytes) {
        chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
                        walk.nbytes);
        err = blkcipher_walk_done(desc, &walk, 0);
    }

    kernel_fpu_end();

    return err;
}
Beispiel #20
0
static int crypto_ctr_crypt(struct blkcipher_desc *desc,
			      struct scatterlist *dst, struct scatterlist *src,
			      unsigned int nbytes)
{
	struct blkcipher_walk walk;
	struct crypto_blkcipher *tfm = desc->tfm;
	struct crypto_ctr_ctx *ctx = crypto_blkcipher_ctx(tfm);
	struct crypto_cipher *child = ctx->child;
	unsigned int bsize = crypto_cipher_blocksize(child);
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt_block(desc, &walk, bsize);

	while (walk.nbytes >= bsize) {

#ifdef CONFIG_CRYPTO_DEV_REALTEK
		if (ctx->rtl_ctx.mode >= 0)
		{
			int i, over_flag = 0;
			unsigned int one = 0, len = 0;			
			u8 over_iv[32] = {0};
			u8 *src = walk.src.virt.addr;
			u8 *dst = walk.dst.virt.addr;
			/* hw CTRBLK overflow handle different with linux kernel
			 *  hw engine: CTRBLK := NONCE || IV || ONE,  NONCE 4 bytes, IV 8bytes, ONE 4bytes 
			 *  hw engine only the ONE(4bytes) is treated as counter bytes
			 *  linux kernel uses the second method, which means the entire byte block is treated as counter bytes
			 */
			over_flag = 0;
			one = *((unsigned int *)(walk.iv + bsize - 4));
			for (i = 0; i < (walk.nbytes / bsize); i++)
			{					
				if (one == 0xffffffff)
				{
					//printk("%s %d i=%d one=%u\n", __FUNCTION__, __LINE__, i, one);
					over_flag = 1;
					break;
				}
				one++;
			}
			if (over_flag)
			{
				//before ONE overflow 
				len = bsize*(i+1);
				nbytes = rtl_cipher_crypt(child, 1,
				&ctx->rtl_ctx, walk.src.virt.addr, len,
				walk.iv, walk.dst.virt.addr);
				//printk("%s %d len=%u nbytes=%u \n", __FUNCTION__, __LINE__, len, nbytes);
				src += (len - nbytes);
				dst += (len - nbytes);
								
				//after ONE overflow,update IV
				memcpy(over_iv, walk.iv, bsize - 4);
				crypto_inc(over_iv, bsize-4);
				memcpy(walk.iv, over_iv, bsize);
				
				nbytes = rtl_cipher_crypt(child, 1,
				&ctx->rtl_ctx, src, walk.nbytes -len,
				walk.iv, dst);
				
				/* increment counter in counterblock */
				for (i = 0; i < ((walk.nbytes -len) / bsize); i++)
					crypto_inc(walk.iv, bsize);
				
				if (walk.src.virt.addr == walk.dst.virt.addr)
				{
					src += ((walk.nbytes -len) - nbytes);
				}
				else
				{
					src += ((walk.nbytes -len) - nbytes);
					dst += ((walk.nbytes -len) - nbytes);
				}
				
			}
			else
			{
				nbytes = rtl_cipher_crypt(child, 1,
					&ctx->rtl_ctx, walk.src.virt.addr, walk.nbytes,
					walk.iv, walk.dst.virt.addr);		
				if (walk.src.virt.addr == walk.dst.virt.addr)
				{
					walk.src.virt.addr += (walk.nbytes - nbytes);
				}
				else
				{
					walk.dst.virt.addr += (walk.nbytes - nbytes);
					walk.dst.virt.addr += (walk.nbytes - nbytes);
				}
				/* increment counter in counterblock */
				for (i = 0; i < (walk.nbytes / bsize); i++)
					crypto_inc(walk.iv, bsize);
			}

			err = blkcipher_walk_done(desc, &walk, nbytes);
			continue;
		}
#endif

		if (walk.src.virt.addr == walk.dst.virt.addr)
			nbytes = crypto_ctr_crypt_inplace(&walk, child);
		else
			nbytes = crypto_ctr_crypt_segment(&walk, child);

		err = blkcipher_walk_done(desc, &walk, nbytes);
	}

	if (walk.nbytes) {
		crypto_ctr_crypt_final(&walk, child);
		err = blkcipher_walk_done(desc, &walk, 0);
	}

	return err;
}
Beispiel #21
0
static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
			    struct s390_des_ctx *ctx,
			    struct blkcipher_walk *walk)
{
	int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
	unsigned int n, nbytes;
	u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE];
	u8 *out, *in, *ctrptr = ctrbuf;

	if (!walk->nbytes)
		return ret;

	if (spin_trylock(&ctrblk_lock))
		ctrptr = ctrblk;

	memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE);
	while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
		out = walk->dst.virt.addr;
		in = walk->src.virt.addr;
		while (nbytes >= DES_BLOCK_SIZE) {
			if (ctrptr == ctrblk)
				n = __ctrblk_init(ctrptr, nbytes);
			else
				n = DES_BLOCK_SIZE;
			ret = crypt_s390_kmctr(func, ctx->key, out, in,
					       n, ctrptr);
			if (ret < 0 || ret != n) {
				if (ctrptr == ctrblk)
					spin_unlock(&ctrblk_lock);
				return -EIO;
			}
			if (n > DES_BLOCK_SIZE)
				memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE,
				       DES_BLOCK_SIZE);
			crypto_inc(ctrptr, DES_BLOCK_SIZE);
			out += n;
			in += n;
			nbytes -= n;
		}
		ret = blkcipher_walk_done(desc, walk, nbytes);
	}
	if (ctrptr == ctrblk) {
		if (nbytes)
			memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE);
		else
			memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
		spin_unlock(&ctrblk_lock);
	} else {
		if (!nbytes)
			memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
	}
	/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
	if (nbytes) {
		out = walk->dst.virt.addr;
		in = walk->src.virt.addr;
		ret = crypt_s390_kmctr(func, ctx->key, buf, in,
				       DES_BLOCK_SIZE, ctrbuf);
		if (ret < 0 || ret != DES_BLOCK_SIZE)
			return -EIO;
		memcpy(out, buf, nbytes);
		crypto_inc(ctrbuf, DES_BLOCK_SIZE);
		ret = blkcipher_walk_done(desc, walk, 0);
		memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE);
	}
	return ret;
}