Exemple #1
0
static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
			  struct blkcipher_walk *walk)
{
	struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	u8 buf[AES_BLOCK_SIZE], *ctrptr;
	unsigned int nbytes, n, k;
	int ret, locked;

	locked = spin_trylock(&ctrblk_lock);

	ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
	while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
		n = AES_BLOCK_SIZE;
		if (nbytes >= 2*AES_BLOCK_SIZE && locked)
			n = __ctrblk_init(ctrblk, walk->iv, nbytes);
		ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
		k = cpacf_kmctr(ctx->fc | modifier, ctx->pk.protkey,
				walk->dst.virt.addr, walk->src.virt.addr,
				n, ctrptr);
		if (k) {
			if (ctrptr == ctrblk)
				memcpy(walk->iv, ctrptr + k - AES_BLOCK_SIZE,
				       AES_BLOCK_SIZE);
			crypto_inc(walk->iv, AES_BLOCK_SIZE);
			ret = blkcipher_walk_done(desc, walk, nbytes - n);
		}
		if (k < n) {
			if (__ctr_paes_set_key(ctx) != 0) {
				if (locked)
					spin_unlock(&ctrblk_lock);
				return blkcipher_walk_done(desc, walk, -EIO);
			}
		}
	}
	if (locked)
		spin_unlock(&ctrblk_lock);
	/*
	 * final block may be < AES_BLOCK_SIZE, copy only nbytes
	 */
	if (nbytes) {
		while (1) {
			if (cpacf_kmctr(ctx->fc | modifier,
					ctx->pk.protkey, buf,
					walk->src.virt.addr, AES_BLOCK_SIZE,
					walk->iv) == AES_BLOCK_SIZE)
				break;
			if (__ctr_paes_set_key(ctx) != 0)
				return blkcipher_walk_done(desc, walk, -EIO);
		}
		memcpy(walk->dst.virt.addr, buf, nbytes);
		crypto_inc(walk->iv, AES_BLOCK_SIZE);
		ret = blkcipher_walk_done(desc, walk, 0);
	}

	return ret;
}
Exemple #2
0
static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
			 struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
{
	int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
	unsigned int i, n, nbytes;
	u8 buf[AES_BLOCK_SIZE];
	u8 *out, *in;

	if (!walk->nbytes)
		return ret;

	memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE);
	while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
		out = walk->dst.virt.addr;
		in = walk->src.virt.addr;
		while (nbytes >= AES_BLOCK_SIZE) {
			/* only use complete blocks, max. PAGE_SIZE */
			n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
						 nbytes & ~(AES_BLOCK_SIZE - 1);
			for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
				memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
				       AES_BLOCK_SIZE);
				crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
			}
			ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
			if (ret < 0 || ret != n)
				return -EIO;
			if (n > AES_BLOCK_SIZE)
				memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
				       AES_BLOCK_SIZE);
			crypto_inc(ctrblk, AES_BLOCK_SIZE);
			out += n;
			in += n;
			nbytes -= n;
		}
		ret = blkcipher_walk_done(desc, walk, nbytes);
	}
	/*
	 * final block may be < AES_BLOCK_SIZE, copy only nbytes
	 */
	if (nbytes) {
		out = walk->dst.virt.addr;
		in = walk->src.virt.addr;
		ret = crypt_s390_kmctr(func, sctx->key, buf, in,
				       AES_BLOCK_SIZE, ctrblk);
		if (ret < 0 || ret != AES_BLOCK_SIZE)
			return -EIO;
		memcpy(out, buf, nbytes);
		crypto_inc(ctrblk, AES_BLOCK_SIZE);
		ret = blkcipher_walk_done(desc, walk, 0);
	}
	memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
	return ret;
}
Exemple #3
0
static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
			    struct s390_des_ctx *ctx, struct blkcipher_walk *walk)
{
	int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
	unsigned int i, n, nbytes;
	u8 buf[DES_BLOCK_SIZE];
	u8 *out, *in;

	memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE);
	while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
		out = walk->dst.virt.addr;
		in = walk->src.virt.addr;
		while (nbytes >= DES_BLOCK_SIZE) {
			/* align to block size, max. PAGE_SIZE */
			n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
				nbytes & ~(DES_BLOCK_SIZE - 1);
			for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
				memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE,
				       DES_BLOCK_SIZE);
				crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
			}
			ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
			BUG_ON((ret < 0) || (ret != n));
			if (n > DES_BLOCK_SIZE)
				memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
				       DES_BLOCK_SIZE);
			crypto_inc(ctrblk, DES_BLOCK_SIZE);
			out += n;
			in += n;
			nbytes -= n;
		}
		ret = blkcipher_walk_done(desc, walk, nbytes);
	}

	/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
	if (nbytes) {
		out = walk->dst.virt.addr;
		in = walk->src.virt.addr;
		ret = crypt_s390_kmctr(func, ctx->key, buf, in,
				       DES_BLOCK_SIZE, ctrblk);
		BUG_ON(ret < 0 || ret != DES_BLOCK_SIZE);
		memcpy(out, buf, nbytes);
		crypto_inc(ctrblk, DES_BLOCK_SIZE);
		ret = blkcipher_walk_done(desc, walk, 0);
	}
	memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE);
	return ret;
}
Exemple #4
0
static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
    struct scatterlist *dst, struct scatterlist *src,
    unsigned int nbytes)
{
    int ret;
    struct blkcipher_walk walk;
    struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(
            crypto_blkcipher_tfm(desc->tfm));
    struct blkcipher_desc fallback_desc = {
        .tfm = ctx->fallback,
        .info = desc->info,
        .flags = desc->flags
    };

    if (in_interrupt()) {
        ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes);
    } else {
        blkcipher_walk_init(&walk, dst, src, nbytes);
        ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
        while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
            pagefault_disable();
            enable_kernel_altivec();
            aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, walk.dst.virt.addr,
                (nbytes & AES_BLOCK_MASK)/AES_BLOCK_SIZE, &ctx->enc_key, walk.iv);
            pagefault_enable();

            crypto_inc(walk.iv, AES_BLOCK_SIZE);
            nbytes &= AES_BLOCK_SIZE - 1;
            ret = blkcipher_walk_done(desc, &walk, nbytes);
        }
        if (walk.nbytes) {
            p8_aes_ctr_final(ctx, &walk);
            ret = blkcipher_walk_done(desc, &walk, 0);
        }
    }

    return ret;
}

struct crypto_alg p8_aes_ctr_alg = {
    .cra_name = "ctr(aes)",
    .cra_driver_name = "p8_aes_ctr",
    .cra_module = THIS_MODULE,
    .cra_priority = 1000,
    .cra_type = &crypto_blkcipher_type,
    .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
    .cra_alignmask = 0,
    .cra_blocksize = 1,
    .cra_ctxsize = sizeof(struct p8_aes_ctr_ctx),
    .cra_init = p8_aes_ctr_init,
    .cra_exit = p8_aes_ctr_exit,
    .cra_blkcipher = {
        .ivsize = 0,
        .min_keysize = AES_MIN_KEY_SIZE,
        .max_keysize = AES_MAX_KEY_SIZE,
        .setkey = p8_aes_ctr_setkey,
        .encrypt = p8_aes_ctr_crypt,
        .decrypt = p8_aes_ctr_crypt,
    },
};
Exemple #5
0
static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk,
				    struct crypto_cipher *tfm)
{
	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
		   crypto_cipher_alg(tfm)->cia_encrypt;
	unsigned int bsize = crypto_cipher_blocksize(tfm);
	u8 *ctrblk = walk->iv;
	u8 *src = walk->src.virt.addr;
	u8 *dst = walk->dst.virt.addr;
	unsigned int nbytes = walk->nbytes;

	do {
		/* create keystream */
		fn(crypto_cipher_tfm(tfm), dst, ctrblk);
		crypto_xor(dst, src, bsize);

		/* increment counter in counterblock */
		crypto_inc(ctrblk, bsize);

		src += bsize;
		dst += bsize;
	} while ((nbytes -= bsize) >= bsize);

	return nbytes;
}
Exemple #6
0
static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk,
				    struct crypto_cipher *tfm)
{
	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
		   crypto_cipher_alg(tfm)->cia_encrypt;
	unsigned int bsize = crypto_cipher_blocksize(tfm);
	unsigned long alignmask = crypto_cipher_alignmask(tfm);
	unsigned int nbytes = walk->nbytes;
	u8 *ctrblk = walk->iv;
	u8 *src = walk->src.virt.addr;
	u8 tmp[bsize + alignmask];
	u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);

	do {
		/* create keystream */
		fn(crypto_cipher_tfm(tfm), keystream, ctrblk);
		crypto_xor(src, keystream, bsize);

		/* increment counter in counterblock */
		crypto_inc(ctrblk, bsize);

		src += bsize;
	} while ((nbytes -= bsize) >= bsize);

	return nbytes;
}
Exemple #7
0
static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
{
	unsigned int i, n;

	/* align to block size, max. PAGE_SIZE */
	n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
	for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
		memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE);
		crypto_inc(ctrptr + i, DES_BLOCK_SIZE);
	}
	return n;
}
Exemple #8
0
static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
			    struct scatterlist *dst,
			    struct scatterlist *src, unsigned int nbytes)
{
	int ret;
	u64 inc;
	struct blkcipher_walk walk;
	struct p8_aes_ctr_ctx *ctx =
		crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));

	if (in_interrupt()) {
		SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
		skcipher_request_set_sync_tfm(req, ctx->fallback);
		skcipher_request_set_callback(req, desc->flags, NULL, NULL);
		skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
		ret = crypto_skcipher_encrypt(req);
		skcipher_request_zero(req);
	} else {
		blkcipher_walk_init(&walk, dst, src, nbytes);
		ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
		while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
			preempt_disable();
			pagefault_disable();
			enable_kernel_vsx();
			aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
						    walk.dst.virt.addr,
						    (nbytes &
						     AES_BLOCK_MASK) /
						    AES_BLOCK_SIZE,
						    &ctx->enc_key,
						    walk.iv);
			disable_kernel_vsx();
			pagefault_enable();
			preempt_enable();

			/* We need to update IV mostly for last bytes/round */
			inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE;
			if (inc > 0)
				while (inc--)
					crypto_inc(walk.iv, AES_BLOCK_SIZE);

			nbytes &= AES_BLOCK_SIZE - 1;
			ret = blkcipher_walk_done(desc, &walk, nbytes);
		}
		if (walk.nbytes) {
			p8_aes_ctr_final(ctx, &walk);
			ret = blkcipher_walk_done(desc, &walk, 0);
		}
	}

	return ret;
}
Exemple #9
0
static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
{
	unsigned int i, n;

	/* only use complete blocks, max. PAGE_SIZE */
	n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
	for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
		memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
		       AES_BLOCK_SIZE);
		crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
	}
	return n;
}
Exemple #10
0
static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
{
	unsigned int i, n;

	/* align to block size, max. PAGE_SIZE */
	n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
	memcpy(ctrptr, iv, DES_BLOCK_SIZE);
	for (i = (n / DES_BLOCK_SIZE) - 1; i > 0; i--) {
		memcpy(ctrptr + DES_BLOCK_SIZE, ctrptr, DES_BLOCK_SIZE);
		crypto_inc(ctrptr + DES_BLOCK_SIZE, DES_BLOCK_SIZE);
		ctrptr += DES_BLOCK_SIZE;
	}
	return n;
}
Exemple #11
0
static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
			    struct blkcipher_walk *walk)
{
	u8 *ctrblk = walk->iv;
	u8 keystream[AES_BLOCK_SIZE];
	u8 *src = walk->src.virt.addr;
	u8 *dst = walk->dst.virt.addr;
	unsigned int nbytes = walk->nbytes;

	aesni_enc(ctx, keystream, ctrblk);
	crypto_xor(keystream, src, nbytes);
	memcpy(dst, keystream, nbytes);
	crypto_inc(ctrblk, AES_BLOCK_SIZE);
}
Exemple #12
0
static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
{
	unsigned int i, n;

	/* only use complete blocks, max. PAGE_SIZE */
	memcpy(ctrptr, iv, AES_BLOCK_SIZE);
	n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
	for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
		memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
		crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
		ctrptr += AES_BLOCK_SIZE;
	}
	return n;
}
Exemple #13
0
static int ccm_crypt_fallback(struct skcipher_walk *walk, u8 mac[], u8 iv0[],
			      struct crypto_aes_ctx *ctx, bool enc)
{
	u8 buf[AES_BLOCK_SIZE];
	int err = 0;

	while (walk->nbytes) {
		int blocks = walk->nbytes / AES_BLOCK_SIZE;
		u32 tail = walk->nbytes % AES_BLOCK_SIZE;
		u8 *dst = walk->dst.virt.addr;
		u8 *src = walk->src.virt.addr;
		u32 nbytes = walk->nbytes;

		if (nbytes == walk->total && tail > 0) {
			blocks++;
			tail = 0;
		}

		do {
			u32 bsize = AES_BLOCK_SIZE;

			if (nbytes < AES_BLOCK_SIZE)
				bsize = nbytes;

			crypto_inc(walk->iv, AES_BLOCK_SIZE);
			__aes_arm64_encrypt(ctx->key_enc, buf, walk->iv,
					    num_rounds(ctx));
			__aes_arm64_encrypt(ctx->key_enc, mac, mac,
					    num_rounds(ctx));
			if (enc)
				crypto_xor(mac, src, bsize);
			crypto_xor_cpy(dst, src, buf, bsize);
			if (!enc)
				crypto_xor(mac, dst, bsize);
			dst += bsize;
			src += bsize;
			nbytes -= bsize;
		} while (--blocks);

		err = skcipher_walk_done(walk, tail);
	}

	if (!err) {
		__aes_arm64_encrypt(ctx->key_enc, buf, iv0, num_rounds(ctx));
		__aes_arm64_encrypt(ctx->key_enc, mac, mac, num_rounds(ctx));
		crypto_xor(mac, buf, AES_BLOCK_SIZE);
	}
	return err;
}
Exemple #14
0
static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
			    struct blkcipher_walk *walk)
{
	struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	u8 buf[DES_BLOCK_SIZE], *ctrptr;
	unsigned int n, nbytes;
	int ret, locked;

	locked = spin_trylock(&ctrblk_lock);

	ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
	while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
		n = DES_BLOCK_SIZE;
		if (nbytes >= 2*DES_BLOCK_SIZE && locked)
			n = __ctrblk_init(ctrblk, walk->iv, nbytes);
		ctrptr = (n > DES_BLOCK_SIZE) ? ctrblk : walk->iv;
		cpacf_kmctr(fc, ctx->key, walk->dst.virt.addr,
			    walk->src.virt.addr, n, ctrptr);
		if (ctrptr == ctrblk)
			memcpy(walk->iv, ctrptr + n - DES_BLOCK_SIZE,
				DES_BLOCK_SIZE);
		crypto_inc(walk->iv, DES_BLOCK_SIZE);
		ret = blkcipher_walk_done(desc, walk, nbytes - n);
	}
	if (locked)
		spin_unlock(&ctrblk_lock);
	/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
	if (nbytes) {
		cpacf_kmctr(fc, ctx->key, buf, walk->src.virt.addr,
			    DES_BLOCK_SIZE, walk->iv);
		memcpy(walk->dst.virt.addr, buf, nbytes);
		crypto_inc(walk->iv, DES_BLOCK_SIZE);
		ret = blkcipher_walk_done(desc, walk, 0);
	}
	return ret;
}
Exemple #15
0
static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx,
			    struct blkcipher_walk *walk)
{
	u8 *ctrblk = walk->iv;
	u8 keystream[DES3_EDE_BLOCK_SIZE];
	u8 *src = walk->src.virt.addr;
	u8 *dst = walk->dst.virt.addr;
	unsigned int nbytes = walk->nbytes;

	des3_ede_enc_blk(ctx, keystream, ctrblk);
	crypto_xor(keystream, src, nbytes);
	memcpy(dst, keystream, nbytes);

	crypto_inc(ctrblk, DES3_EDE_BLOCK_SIZE);
}
Exemple #16
0
static void ctr_crypt_final(struct blkcipher_desc *desc,
			    struct blkcipher_walk *walk)
{
	struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	u8 *ctrblk = walk->iv;
	u8 keystream[CAST5_BLOCK_SIZE];
	u8 *src = walk->src.virt.addr;
	u8 *dst = walk->dst.virt.addr;
	unsigned int nbytes = walk->nbytes;

	__cast5_encrypt(ctx, keystream, ctrblk);
	crypto_xor(keystream, src, nbytes);
	memcpy(dst, keystream, nbytes);

	crypto_inc(ctrblk, CAST5_BLOCK_SIZE);
}
Exemple #17
0
static void crypto_ctr_crypt_final(struct skcipher_walk *walk,
				   struct crypto_cipher *tfm)
{
	unsigned int bsize = crypto_cipher_blocksize(tfm);
	unsigned long alignmask = crypto_cipher_alignmask(tfm);
	u8 *ctrblk = walk->iv;
	u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
	u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
	u8 *src = walk->src.virt.addr;
	u8 *dst = walk->dst.virt.addr;
	unsigned int nbytes = walk->nbytes;

	crypto_cipher_encrypt_one(tfm, keystream, ctrblk);
	crypto_xor_cpy(dst, keystream, src, nbytes);

	crypto_inc(ctrblk, bsize);
}
Exemple #18
0
static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
                struct blkcipher_walk *walk)
{
    u8 *ctrblk = walk->iv;
    u8 keystream[AES_BLOCK_SIZE];
    u8 *src = walk->src.virt.addr;
    u8 *dst = walk->dst.virt.addr;
    unsigned int nbytes = walk->nbytes;

    pagefault_disable();
    enable_kernel_altivec();
    aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
    pagefault_enable();

    crypto_xor(keystream, src, nbytes);
    memcpy(dst, keystream, nbytes);
    crypto_inc(ctrblk, AES_BLOCK_SIZE);
}
Exemple #19
0
static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
			    struct s390_des_ctx *ctx,
			    struct blkcipher_walk *walk)
{
	int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
	unsigned int n, nbytes;
	u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE];
	u8 *out, *in, *ctrptr = ctrbuf;

	if (!walk->nbytes)
		return ret;

	if (spin_trylock(&ctrblk_lock))
		ctrptr = ctrblk;

	memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE);
	while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
		out = walk->dst.virt.addr;
		in = walk->src.virt.addr;
		while (nbytes >= DES_BLOCK_SIZE) {
			if (ctrptr == ctrblk)
				n = __ctrblk_init(ctrptr, nbytes);
			else
				n = DES_BLOCK_SIZE;
			ret = crypt_s390_kmctr(func, ctx->key, out, in,
					       n, ctrptr);
			if (ret < 0 || ret != n) {
				if (ctrptr == ctrblk)
					spin_unlock(&ctrblk_lock);
				return -EIO;
			}
			if (n > DES_BLOCK_SIZE)
				memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE,
				       DES_BLOCK_SIZE);
			crypto_inc(ctrptr, DES_BLOCK_SIZE);
			out += n;
			in += n;
			nbytes -= n;
		}
		ret = blkcipher_walk_done(desc, walk, nbytes);
	}
	if (ctrptr == ctrblk) {
		if (nbytes)
			memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE);
		else
			memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
		spin_unlock(&ctrblk_lock);
	} else {
		if (!nbytes)
			memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
	}
	/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
	if (nbytes) {
		out = walk->dst.virt.addr;
		in = walk->src.virt.addr;
		ret = crypt_s390_kmctr(func, ctx->key, buf, in,
				       DES_BLOCK_SIZE, ctrbuf);
		if (ret < 0 || ret != DES_BLOCK_SIZE)
			return -EIO;
		memcpy(out, buf, nbytes);
		crypto_inc(ctrbuf, DES_BLOCK_SIZE);
		ret = blkcipher_walk_done(desc, walk, 0);
		memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE);
	}
	return ret;
}
Exemple #20
0
static int crypto_ctr_crypt(struct blkcipher_desc *desc,
			      struct scatterlist *dst, struct scatterlist *src,
			      unsigned int nbytes)
{
	struct blkcipher_walk walk;
	struct crypto_blkcipher *tfm = desc->tfm;
	struct crypto_ctr_ctx *ctx = crypto_blkcipher_ctx(tfm);
	struct crypto_cipher *child = ctx->child;
	unsigned int bsize = crypto_cipher_blocksize(child);
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt_block(desc, &walk, bsize);

	while (walk.nbytes >= bsize) {

#ifdef CONFIG_CRYPTO_DEV_REALTEK
		if (ctx->rtl_ctx.mode >= 0)
		{
			int i, over_flag = 0;
			unsigned int one = 0, len = 0;			
			u8 over_iv[32] = {0};
			u8 *src = walk.src.virt.addr;
			u8 *dst = walk.dst.virt.addr;
			/* hw CTRBLK overflow handle different with linux kernel
			 *  hw engine: CTRBLK := NONCE || IV || ONE,  NONCE 4 bytes, IV 8bytes, ONE 4bytes 
			 *  hw engine only the ONE(4bytes) is treated as counter bytes
			 *  linux kernel uses the second method, which means the entire byte block is treated as counter bytes
			 */
			over_flag = 0;
			one = *((unsigned int *)(walk.iv + bsize - 4));
			for (i = 0; i < (walk.nbytes / bsize); i++)
			{					
				if (one == 0xffffffff)
				{
					//printk("%s %d i=%d one=%u\n", __FUNCTION__, __LINE__, i, one);
					over_flag = 1;
					break;
				}
				one++;
			}
			if (over_flag)
			{
				//before ONE overflow 
				len = bsize*(i+1);
				nbytes = rtl_cipher_crypt(child, 1,
				&ctx->rtl_ctx, walk.src.virt.addr, len,
				walk.iv, walk.dst.virt.addr);
				//printk("%s %d len=%u nbytes=%u \n", __FUNCTION__, __LINE__, len, nbytes);
				src += (len - nbytes);
				dst += (len - nbytes);
								
				//after ONE overflow,update IV
				memcpy(over_iv, walk.iv, bsize - 4);
				crypto_inc(over_iv, bsize-4);
				memcpy(walk.iv, over_iv, bsize);
				
				nbytes = rtl_cipher_crypt(child, 1,
				&ctx->rtl_ctx, src, walk.nbytes -len,
				walk.iv, dst);
				
				/* increment counter in counterblock */
				for (i = 0; i < ((walk.nbytes -len) / bsize); i++)
					crypto_inc(walk.iv, bsize);
				
				if (walk.src.virt.addr == walk.dst.virt.addr)
				{
					src += ((walk.nbytes -len) - nbytes);
				}
				else
				{
					src += ((walk.nbytes -len) - nbytes);
					dst += ((walk.nbytes -len) - nbytes);
				}
				
			}
			else
			{
				nbytes = rtl_cipher_crypt(child, 1,
					&ctx->rtl_ctx, walk.src.virt.addr, walk.nbytes,
					walk.iv, walk.dst.virt.addr);		
				if (walk.src.virt.addr == walk.dst.virt.addr)
				{
					walk.src.virt.addr += (walk.nbytes - nbytes);
				}
				else
				{
					walk.dst.virt.addr += (walk.nbytes - nbytes);
					walk.dst.virt.addr += (walk.nbytes - nbytes);
				}
				/* increment counter in counterblock */
				for (i = 0; i < (walk.nbytes / bsize); i++)
					crypto_inc(walk.iv, bsize);
			}

			err = blkcipher_walk_done(desc, &walk, nbytes);
			continue;
		}
#endif

		if (walk.src.virt.addr == walk.dst.virt.addr)
			nbytes = crypto_ctr_crypt_inplace(&walk, child);
		else
			nbytes = crypto_ctr_crypt_segment(&walk, child);

		err = blkcipher_walk_done(desc, &walk, nbytes);
	}

	if (walk.nbytes) {
		crypto_ctr_crypt_final(&walk, child);
		err = blkcipher_walk_done(desc, &walk, 0);
	}

	return err;
}