static int blkcipher_walk_next(struct blkcipher_desc *desc,
			       struct blkcipher_walk *walk)
{
	unsigned int bsize;
	unsigned int n;
	int err;

	n = walk->total;
	if (unlikely(n < walk->cipher_blocksize)) {
		desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
		return blkcipher_walk_done(desc, walk, -EINVAL);
	}

	bsize = min(walk->walk_blocksize, n);

	walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
			 BLKCIPHER_WALK_DIFF);
	if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
	    !scatterwalk_aligned(&walk->out, walk->alignmask)) {
		walk->flags |= BLKCIPHER_WALK_COPY;
		if (!walk->page) {
			walk->page = (void *)__get_free_page(GFP_ATOMIC);
			if (!walk->page)
				n = 0;
		}
	}

	n = scatterwalk_clamp(&walk->in, n);
	n = scatterwalk_clamp(&walk->out, n);

	if (unlikely(n < bsize)) {
		err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
		goto set_phys_lowmem;
	}

	walk->nbytes = n;
	if (walk->flags & BLKCIPHER_WALK_COPY) {
		err = blkcipher_next_copy(walk);
		goto set_phys_lowmem;
	}

	return blkcipher_next_fast(desc, walk);

set_phys_lowmem:
	if (walk->flags & BLKCIPHER_WALK_PHYS) {
		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
		walk->src.phys.offset &= PAGE_SIZE - 1;
		walk->dst.phys.offset &= PAGE_SIZE - 1;
	}
	return err;
}
Пример #2
0
static int
xlr_ctr_encrypt(struct blkcipher_desc *desc,
		  struct scatterlist *dst, struct scatterlist *src,
		  unsigned int nbytes)
{
	struct xlr_crypt_state *op = crypto_blkcipher_ctx(desc->tfm);
	op_handle_t handle = NLMSAE_HANDLE_INITIALIZER;
	Crypto_Operation_pt cop=NULL;
	struct blkcipher_walk walk;
	int err, ret;

	if (( ret = xlr_crypt_op_init(op, &handle, &cop)))
		return ret; 


	cop->m = NLM_CTR;
	cop->encrypt = CIPHER_ENCRYPT;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt(desc, &walk);

        debug_print(KERN_INFO "in func %s:%d\n",__FUNCTION__,__LINE__);
	while((nbytes = walk.nbytes)) {
		cop->iv = op->iv;
		cop->iv_len = op->iv_len;
		cop->nonce = op->nonce;
		cop->input = walk.src.virt.addr;
		cop->input_len = nbytes - (nbytes % op->block_size);
		cop->output = walk.dst.virt.addr;

		memcpy(op->iv, walk.iv, op->iv_len);

		ret = nlmsec_cipher_digest_hmac_cksum(handle, cop);
		if(IS_SUCCESS_SAEOP(ret)) 
			xlr_inc_enc_stat(cop->c, cop->m, cop->input_len);
		debug_print(KERN_INFO "input_len = %d, retval = %d \n",
			    cop->input_len, ret);

		if (ret == -1)
			return ret;
		
		memcpy(walk.iv, cop->output + cop->input_len - op->iv_len,
		       op->iv_len);

		nbytes -= cop->input_len;
		err = blkcipher_walk_done(desc, &walk, nbytes);
	}
	debug_print(KERN_INFO "in func %s:%d\n",__FUNCTION__,__LINE__);

	nlmsec_op_cleanup(&handle);
	return err;
}
Пример #3
0
static int crypto_cbc_decrypt(struct blkcipher_desc *desc,
                              struct scatterlist *dst, struct scatterlist *src,
                              unsigned int nbytes)
{
    struct blkcipher_walk walk;
    struct crypto_blkcipher *tfm = desc->tfm;
    struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
    struct crypto_cipher *child = ctx->child;
    int err;

    blkcipher_walk_init(&walk, dst, src, nbytes);
    err = blkcipher_walk_virt(desc, &walk);

    while ((nbytes = walk.nbytes)) {
#ifdef CONFIG_CRYPTO_DEV_REALTEK
        if (ctx->rtl_ctx.mode >= 0)
        {
            nbytes = rtl_cipher_crypt(child, 0,
                                      &ctx->rtl_ctx, walk.src.virt.addr, nbytes,
                                      walk.iv, walk.dst.virt.addr);

            // cbc mode update
            memcpy(walk.iv, walk.dst.virt.addr,
                   crypto_cipher_blocksize(child));

            err = blkcipher_walk_done(desc, &walk, nbytes);
            continue;
        }
#endif
        if (walk.src.virt.addr == walk.dst.virt.addr)
            nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child);
        else
            nbytes = crypto_cbc_decrypt_segment(desc, &walk, child);
        err = blkcipher_walk_done(desc, &walk, nbytes);
    }

    return err;
}
Пример #4
0
static int ecb_paes_crypt(struct blkcipher_desc *desc,
			  unsigned long modifier,
			  struct blkcipher_walk *walk)
{
	struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	unsigned int nbytes, n, k;
	int ret;

	ret = blkcipher_walk_virt(desc, walk);
	while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
		/* only use complete blocks */
		n = nbytes & ~(AES_BLOCK_SIZE - 1);
		k = cpacf_km(ctx->fc | modifier, ctx->pk.protkey,
			     walk->dst.virt.addr, walk->src.virt.addr, n);
		if (k)
			ret = blkcipher_walk_done(desc, walk, nbytes - k);
		if (k < n) {
			if (__paes_set_key(ctx) != 0)
				return blkcipher_walk_done(desc, walk, -EIO);
		}
	}
	return ret;
}
static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
			    struct scatterlist *dst,
			    struct scatterlist *src,
			    unsigned int nbytes, int enc)
{
	int ret;
	u8 tweak[AES_BLOCK_SIZE];
	u8 *iv;
	struct blkcipher_walk walk;
	struct p8_aes_xts_ctx *ctx =
		crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));

	if (in_interrupt()) {
		SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
		skcipher_request_set_tfm(req, ctx->fallback);
		skcipher_request_set_callback(req, desc->flags, NULL, NULL);
		skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
		ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
		skcipher_request_zero(req);
	} else {
		preempt_disable();
		pagefault_disable();
		enable_kernel_vsx();

		blkcipher_walk_init(&walk, dst, src, nbytes);

		ret = blkcipher_walk_virt(desc, &walk);
		iv = walk.iv;
		memset(tweak, 0, AES_BLOCK_SIZE);
		aes_p8_encrypt(iv, tweak, &ctx->tweak_key);

		while ((nbytes = walk.nbytes)) {
			if (enc)
				aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
						nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
			else
				aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
						nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);

			nbytes &= AES_BLOCK_SIZE - 1;
			ret = blkcipher_walk_done(desc, &walk, nbytes);
		}

		disable_kernel_vsx();
		pagefault_enable();
		preempt_enable();
	}
	return ret;
}
Пример #6
0
static int chacha20_simd(struct blkcipher_desc *desc, struct scatterlist *dst,
                         struct scatterlist *src, unsigned int nbytes)
{
    u32 *state, state_buf[16 + (CHACHA20_STATE_ALIGN / sizeof(u32)) - 1];
    struct blkcipher_walk walk;
    int err;

    if (!may_use_simd())
        return crypto_chacha20_crypt(desc, dst, src, nbytes);

    state = (u32 *)roundup((uintptr_t)state_buf, CHACHA20_STATE_ALIGN);

    blkcipher_walk_init(&walk, dst, src, nbytes);
    err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE);

    crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);

    kernel_fpu_begin();

    while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
        chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
                        rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE));
        err = blkcipher_walk_done(desc, &walk,
                                  walk.nbytes % CHACHA20_BLOCK_SIZE);
    }

    if (walk.nbytes) {
        chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
                        walk.nbytes);
        err = blkcipher_walk_done(desc, &walk, 0);
    }

    kernel_fpu_end();

    return err;
}
Пример #7
0
static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
			    struct blkcipher_walk *walk)
{
	struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	u8 buf[DES_BLOCK_SIZE], *ctrptr;
	unsigned int n, nbytes;
	int ret, locked;

	locked = spin_trylock(&ctrblk_lock);

	ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
	while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
		n = DES_BLOCK_SIZE;
		if (nbytes >= 2*DES_BLOCK_SIZE && locked)
			n = __ctrblk_init(ctrblk, walk->iv, nbytes);
		ctrptr = (n > DES_BLOCK_SIZE) ? ctrblk : walk->iv;
		cpacf_kmctr(fc, ctx->key, walk->dst.virt.addr,
			    walk->src.virt.addr, n, ctrptr);
		if (ctrptr == ctrblk)
			memcpy(walk->iv, ctrptr + n - DES_BLOCK_SIZE,
				DES_BLOCK_SIZE);
		crypto_inc(walk->iv, DES_BLOCK_SIZE);
		ret = blkcipher_walk_done(desc, walk, nbytes - n);
	}
	if (locked)
		spin_unlock(&ctrblk_lock);
	/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
	if (nbytes) {
		cpacf_kmctr(fc, ctx->key, buf, walk->src.virt.addr,
			    DES_BLOCK_SIZE, walk->iv);
		memcpy(walk->dst.virt.addr, buf, nbytes);
		crypto_inc(walk->iv, DES_BLOCK_SIZE);
		ret = blkcipher_walk_done(desc, walk, 0);
	}
	return ret;
}
Пример #8
0
static int encrypt(struct blkcipher_desc *desc,
		   struct scatterlist *dst, struct scatterlist *src,
		   unsigned int nbytes)
{
	struct blkcipher_walk walk;
	struct crypto_blkcipher *tfm = desc->tfm;
	struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm);
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt_block(desc, &walk, 64);

	salsa20_ivsetup(ctx, walk.iv);

	if (likely(walk.nbytes == nbytes))
	{
		salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
				      walk.dst.virt.addr, nbytes);
		return blkcipher_walk_done(desc, &walk, 0);
	}

	while (walk.nbytes >= 64) {
		salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
				      walk.dst.virt.addr,
				      walk.nbytes - (walk.nbytes % 64));
		err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64);
	}

	if (walk.nbytes) {
		salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
				      walk.dst.virt.addr, walk.nbytes);
		err = blkcipher_walk_done(desc, &walk, 0);
	}

	return err;
}
Пример #9
0
/**
 * nx_build_sg_lists - walk the input scatterlists and build arrays of NX
 *                     scatterlists based on them.
 *
 * @nx_ctx: NX crypto context for the lists we're building
 * @desc: the block cipher descriptor for the operation
 * @dst: destination scatterlist
 * @src: source scatterlist
 * @nbytes: length of data described in the scatterlists
 * @iv: destination for the iv data, if the algorithm requires it
 *
 * This is common code shared by all the AES algorithms. It uses the block
 * cipher walk routines to traverse input and output scatterlists, building
 * corresponding NX scatterlists
 */
int nx_build_sg_lists(struct nx_crypto_ctx  *nx_ctx,
		      struct blkcipher_desc *desc,
		      struct scatterlist    *dst,
		      struct scatterlist    *src,
		      unsigned int           nbytes,
		      u8                    *iv)
{
	struct nx_sg *nx_insg = nx_ctx->in_sg;
	struct nx_sg *nx_outsg = nx_ctx->out_sg;
	struct blkcipher_walk walk;
	int rc;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	rc = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
	if (rc)
		goto out;

	if (iv)
		memcpy(iv, walk.iv, AES_BLOCK_SIZE);

	while (walk.nbytes) {
		nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr,
					   walk.nbytes, nx_ctx->ap->sglen);
		nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr,
					    walk.nbytes, nx_ctx->ap->sglen);

		rc = blkcipher_walk_done(desc, &walk, 0);
		if (rc)
			break;
	}

	if (walk.nbytes) {
		nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr,
					   walk.nbytes, nx_ctx->ap->sglen);
		nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr,
					    walk.nbytes, nx_ctx->ap->sglen);

		rc = 0;
	}

	/* these lengths should be negative, which will indicate to phyp that
	 * the input and output parameters are scatterlists, not linear
	 * buffers */
	nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg);
	nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg);
out:
	return rc;
}
Пример #10
0
static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		       struct scatterlist *src, unsigned int nbytes)
{
	struct blkcipher_walk walk;
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt(desc, &walk);

	while ((nbytes = walk.nbytes)) {
		nbytes = __cbc_encrypt(desc, &walk);
		err = blkcipher_walk_done(desc, &walk, nbytes);
	}

	return err;
}
Пример #11
0
static int ecb_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
			    struct blkcipher_walk *walk)
{
	struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	unsigned int nbytes, n;
	int ret;

	ret = blkcipher_walk_virt(desc, walk);
	while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
		/* only use complete blocks */
		n = nbytes & ~(DES_BLOCK_SIZE - 1);
		cpacf_km(fc, ctx->key, walk->dst.virt.addr,
			 walk->src.virt.addr, n);
		ret = blkcipher_walk_done(desc, walk, nbytes - n);
	}
	return ret;
}
Пример #12
0
static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
			 struct s390_xts_ctx *xts_ctx,
			 struct blkcipher_walk *walk)
{
	unsigned int offset = (xts_ctx->key_len >> 1) & 0x10;
	int ret = blkcipher_walk_virt(desc, walk);
	unsigned int nbytes = walk->nbytes;
	unsigned int n;
	u8 *in, *out;
	struct pcc_param pcc_param;
	struct {
		u8 key[32];
		u8 init[16];
	} xts_param;

	if (!nbytes)
		goto out;

	memset(pcc_param.block, 0, sizeof(pcc_param.block));
	memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
	memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
	memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
	memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
	ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
	if (ret < 0)
		return -EIO;

	memcpy(xts_param.key, xts_ctx->key, 32);
	memcpy(xts_param.init, pcc_param.xts, 16);
	do {
		/* only use complete blocks */
		n = nbytes & ~(AES_BLOCK_SIZE - 1);
		out = walk->dst.virt.addr;
		in = walk->src.virt.addr;

		ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
		if (ret < 0 || ret != n)
			return -EIO;

		nbytes &= AES_BLOCK_SIZE - 1;
		ret = blkcipher_walk_done(desc, walk, nbytes);
	} while ((nbytes = walk->nbytes));
out:
	return ret;
}
Пример #13
0
static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
				   struct blkcipher_desc *desc,
				   struct blkcipher_walk *walk)
{
	void *ctx = crypto_blkcipher_ctx(desc->tfm);
	const unsigned int bsize = 128 / 8;
	unsigned int nbytes, i, func_bytes;
	bool fpu_enabled = false;
	int err;

	err = blkcipher_walk_virt(desc, walk);

	while ((nbytes = walk->nbytes)) {
		u8 *wsrc = walk->src.virt.addr;
		u8 *wdst = walk->dst.virt.addr;

		fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
					     desc, fpu_enabled, nbytes);

		for (i = 0; i < gctx->num_funcs; i++) {
			func_bytes = bsize * gctx->funcs[i].num_blocks;

			/* Process multi-block batch */
			if (nbytes >= func_bytes) {
				do {
					gctx->funcs[i].fn_u.ecb(ctx, wdst,
								wsrc);

					wsrc += func_bytes;
					wdst += func_bytes;
					nbytes -= func_bytes;
				} while (nbytes >= func_bytes);

				if (nbytes < bsize)
					goto done;
			}
		}

done:
		err = blkcipher_walk_done(desc, walk, nbytes);
	}

	glue_fpu_end(fpu_enabled);
	return err;
}
Пример #14
0
int glue_cbc_encrypt_128bit(const common_glue_func_t fn,
			    struct blkcipher_desc *desc,
			    struct scatterlist *dst,
			    struct scatterlist *src, unsigned int nbytes)
{
	struct blkcipher_walk walk;
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt(desc, &walk);

	while ((nbytes = walk.nbytes)) {
		nbytes = __glue_cbc_encrypt_128bit(fn, desc, &walk);
		err = blkcipher_walk_done(desc, &walk, nbytes);
	}

	return err;
}
static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
		     void (*fn)(struct bf_ctx *, u8 *, const u8 *),
		     void (*fn_4way)(struct bf_ctx *, u8 *, const u8 *))
{
	struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	unsigned int bsize = BF_BLOCK_SIZE;
	unsigned int nbytes;
	int err;

	err = blkcipher_walk_virt(desc, walk);

	while ((nbytes = walk->nbytes)) {
		u8 *wsrc = walk->src.virt.addr;
		u8 *wdst = walk->dst.virt.addr;

		
		if (nbytes >= bsize * 4) {
			do {
				fn_4way(ctx, wdst, wsrc);

				wsrc += bsize * 4;
				wdst += bsize * 4;
				nbytes -= bsize * 4;
			} while (nbytes >= bsize * 4);

			if (nbytes < bsize)
				goto done;
		}

		
		do {
			fn(ctx, wdst, wsrc);

			wsrc += bsize;
			wdst += bsize;
			nbytes -= bsize;
		} while (nbytes >= bsize);

done:
		err = blkcipher_walk_done(desc, walk, nbytes);
	}

	return err;
}
Пример #16
0
static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
		     void (*fn)(struct twofish_ctx *, u8 *, const u8 *),
		     void (*fn_3way)(struct twofish_ctx *, u8 *, const u8 *))
{
	struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	unsigned int bsize = TF_BLOCK_SIZE;
	unsigned int nbytes;
	int err;

	err = blkcipher_walk_virt(desc, walk);

	while ((nbytes = walk->nbytes)) {
		u8 *wsrc = walk->src.virt.addr;
		u8 *wdst = walk->dst.virt.addr;

		/* Process three block batch */
		if (nbytes >= bsize * 3) {
			do {
				fn_3way(ctx, wdst, wsrc);

				wsrc += bsize * 3;
				wdst += bsize * 3;
				nbytes -= bsize * 3;
			} while (nbytes >= bsize * 3);

			if (nbytes < bsize)
				goto done;
		}

		/* Handle leftovers */
		do {
			fn(ctx, wdst, wsrc);

			wsrc += bsize;
			wdst += bsize;
			nbytes -= bsize;
		} while (nbytes >= bsize);

done:
		err = blkcipher_walk_done(desc, walk, nbytes);
	}

	return err;
}
static int skcipher_null_crypt(struct blkcipher_desc *desc,
			       struct scatterlist *dst,
			       struct scatterlist *src, unsigned int nbytes)
{
	struct blkcipher_walk walk;
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt(desc, &walk);

	while (walk.nbytes) {
		if (walk.src.virt.addr != walk.dst.virt.addr)
			memcpy(walk.dst.virt.addr, walk.src.virt.addr,
			       walk.nbytes);
		err = blkcipher_walk_done(desc, &walk, 0);
	}

	return err;
}
Пример #18
0
static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
		     const u32 *expkey)
{
	unsigned int bsize = DES3_EDE_BLOCK_SIZE;
	unsigned int nbytes;
	int err;

	err = blkcipher_walk_virt(desc, walk);

	while ((nbytes = walk->nbytes)) {
		u8 *wsrc = walk->src.virt.addr;
		u8 *wdst = walk->dst.virt.addr;

		/* Process four block batch */
		if (nbytes >= bsize * 3) {
			do {
				des3_ede_x86_64_crypt_blk_3way(expkey, wdst,
							       wsrc);

				wsrc += bsize * 3;
				wdst += bsize * 3;
				nbytes -= bsize * 3;
			} while (nbytes >= bsize * 3);

			if (nbytes < bsize)
				goto done;
		}

		/* Handle leftovers */
		do {
			des3_ede_x86_64_crypt_blk(expkey, wdst, wsrc);

			wsrc += bsize;
			wdst += bsize;
			nbytes -= bsize;
		} while (nbytes >= bsize);

done:
		err = blkcipher_walk_done(desc, walk, nbytes);
	}

	return err;
}
Пример #19
0
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		       struct scatterlist *src, unsigned int nbytes)
{
	bool fpu_enabled = false;
	struct blkcipher_walk walk;
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt(desc, &walk);
	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

	while ((nbytes = walk.nbytes)) {
		fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
		nbytes = __cbc_decrypt(desc, &walk);
		err = blkcipher_walk_done(desc, &walk, nbytes);
	}

	cast5_fpu_end(fpu_enabled);
	return err;
}
Пример #20
0
static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
			 struct blkcipher_walk *walk)
{
	int ret = blkcipher_walk_virt(desc, walk);
	unsigned int nbytes;

	while ((nbytes = walk->nbytes)) {
		/* only use complete blocks */
		unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
		u8 *out = walk->dst.virt.addr;
		u8 *in = walk->src.virt.addr;

		ret = crypt_s390_km(func, param, out, in, n);
		BUG_ON((ret < 0) || (ret != n));

		nbytes &= AES_BLOCK_SIZE - 1;
		ret = blkcipher_walk_done(desc, walk, nbytes);
	}

	return ret;
}
Пример #21
0
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		       struct scatterlist *src, unsigned int nbytes)
{
	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	int err, first, rounds = 6 + ctx->key_length / 4;
	struct blkcipher_walk walk;
	unsigned int blocks;

	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt(desc, &walk);

	kernel_neon_begin();
	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
		aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
				(u8 *)ctx->key_enc, rounds, blocks, first);
		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
	}
	kernel_neon_end();
	return err;
}
Пример #22
0
static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
			     struct scatterlist *dst,
			     struct scatterlist *src, unsigned int nbytes)
{
	struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	struct blkcipher_walk walk;
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt(desc, &walk);

	while (walk.nbytes) {
		u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
		u8 *src = walk.src.virt.addr;

		if (walk.dst.virt.addr == walk.src.virt.addr) {
			u8 *iv = walk.iv;

			do {
				crypto_xor(src, iv, AES_BLOCK_SIZE);
				AES_encrypt(src, src, &ctx->enc);
				iv = src;
				src += AES_BLOCK_SIZE;
			} while (--blocks);
			memcpy(walk.iv, iv, AES_BLOCK_SIZE);
		} else {
			u8 *dst = walk.dst.virt.addr;

			do {
				crypto_xor(walk.iv, src, AES_BLOCK_SIZE);
				AES_encrypt(walk.iv, dst, &ctx->enc);
				memcpy(walk.iv, dst, AES_BLOCK_SIZE);
				src += AES_BLOCK_SIZE;
				dst += AES_BLOCK_SIZE;
			} while (--blocks);
		}
		err = blkcipher_walk_done(desc, &walk, 0);
	}
	return err;
}
Пример #23
0
static int ecb_arc4_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
			  struct scatterlist *src, unsigned int nbytes)
{
	struct arc4_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	struct blkcipher_walk walk={};
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);

	err = blkcipher_walk_virt(desc, &walk);

	while (walk.nbytes > 0) {
		u8 *wsrc = walk.src.virt.addr;
		u8 *wdst = walk.dst.virt.addr;

		arc4_crypt(ctx, wdst, wsrc, walk.nbytes);

		err = blkcipher_walk_done(desc, &walk, 0);
	}

	return err;
}
Пример #24
0
static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		       struct scatterlist *src, unsigned int nbytes)
{
	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	struct blkcipher_walk walk;
	unsigned int blocks;
	int err;

	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt(desc, &walk);

	kernel_neon_begin();
	while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
		ce_aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
				   (u8 *)ctx->key_dec, num_rounds(ctx), blocks);
		err = blkcipher_walk_done(desc, &walk,
					  walk.nbytes % AES_BLOCK_SIZE);
	}
	kernel_neon_end();
	return err;
}
Пример #25
0
static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
			 struct s390_xts_ctx *xts_ctx,
			 struct blkcipher_walk *walk)
{
	unsigned int offset = (xts_ctx->key_len >> 1) & 0x10;
	int ret = blkcipher_walk_virt(desc, walk);
	unsigned int nbytes = walk->nbytes;
	unsigned int n;
	u8 *in, *out;
	void *param;

	if (!nbytes)
		goto out;

	memset(xts_ctx->pcc.block, 0, sizeof(xts_ctx->pcc.block));
	memset(xts_ctx->pcc.bit, 0, sizeof(xts_ctx->pcc.bit));
	memset(xts_ctx->pcc.xts, 0, sizeof(xts_ctx->pcc.xts));
	memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak));
	param = xts_ctx->pcc.key + offset;
	ret = crypt_s390_pcc(func, param);
	BUG_ON(ret < 0);

	memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16);
	param = xts_ctx->key + offset;
	do {
		
		n = nbytes & ~(AES_BLOCK_SIZE - 1);
		out = walk->dst.virt.addr;
		in = walk->src.virt.addr;

		ret = crypt_s390_km(func, param, out, in, n);
		BUG_ON(ret < 0 || ret != n);

		nbytes &= AES_BLOCK_SIZE - 1;
		ret = blkcipher_walk_done(desc, walk, nbytes);
	} while ((nbytes = walk->nbytes));
out:
	return ret;
}
Пример #26
0
static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
			    u8 *key, struct blkcipher_walk *walk)
{
	int ret = blkcipher_walk_virt(desc, walk);
	unsigned int nbytes;

	while ((nbytes = walk->nbytes)) {
		/* only use complete blocks */
		unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
		u8 *out = walk->dst.virt.addr;
		u8 *in = walk->src.virt.addr;

		ret = crypt_s390_km(func, key, out, in, n);
		if (ret < 0 || ret != n)
			return -EIO;

		nbytes &= DES_BLOCK_SIZE - 1;
		ret = blkcipher_walk_done(desc, walk, nbytes);
	}

	return ret;
}
Пример #27
0
static int
xlr_ecb_encrypt(struct blkcipher_desc *desc,
		  struct scatterlist *dst, struct scatterlist *src,
		  unsigned int nbytes)
{
	struct xlr_crypt_state *op = crypto_blkcipher_ctx(desc->tfm);
	op_handle_t handle = NLMSAE_HANDLE_INITIALIZER;
	Crypto_Operation_pt cop=NULL;
	struct blkcipher_walk walk;
	int err, ret;

	if (( ret = xlr_crypt_op_init(op, &handle, &cop)))
		return ret; 

	cop->m = NLM_ECB;
	cop->encrypt = CIPHER_ENCRYPT;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt(desc, &walk);

	while((nbytes = walk.nbytes)) {
		cop->iv_len = 0;
		cop->input = walk.src.virt.addr;
		cop->input_len = nbytes - (nbytes % op->block_size);
		cop->output = walk.dst.virt.addr;

		ret = nlmsec_cipher_digest_hmac_cksum(handle, cop);
		if(IS_SUCCESS_SAEOP(ret)) 
			xlr_inc_enc_stat(cop->c, cop->m, cop->input_len);
		if (ret == -1)
			return ret;
		nbytes -= ret;
		ret =  blkcipher_walk_done(desc, &walk, nbytes);
	}
	nlmsec_op_cleanup(&handle);

	return err;
}
Пример #28
0
static int cbc_decrypt(struct blkcipher_desc *desc,
		       struct scatterlist *dst, struct scatterlist *src,
		       unsigned int nbytes)
{
	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
	struct blkcipher_walk walk;
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt(desc, &walk);
	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

	kernel_fpu_begin();
	while ((nbytes = walk.nbytes)) {
		aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
			      nbytes & AES_BLOCK_MASK, walk.iv);
		nbytes &= AES_BLOCK_SIZE - 1;
		err = blkcipher_walk_done(desc, &walk, nbytes);
	}
	kernel_fpu_end();

	return err;
}
Пример #29
0
static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
			     struct scatterlist *dst,
			     struct scatterlist *src, unsigned int nbytes)
{
	struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	struct blkcipher_walk walk;
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);

	/* generate the initial tweak */
	AES_encrypt(walk.iv, walk.iv, &ctx->twkey);

	while (walk.nbytes) {
		kernel_neon_begin();
		bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
				  walk.nbytes, &ctx->dec, walk.iv);
		kernel_neon_end();
		err = blkcipher_walk_done(desc, &walk, 0);
	}
	return err;
}
Пример #30
0
int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
			    struct blkcipher_desc *desc,
			    struct scatterlist *dst,
			    struct scatterlist *src, unsigned int nbytes)
{
	const unsigned int bsize = 128 / 8;
	bool fpu_enabled = false;
	struct blkcipher_walk walk;
	int err;

	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt(desc, &walk);

	while ((nbytes = walk.nbytes)) {
		fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
					     desc, fpu_enabled, nbytes);
		nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
		err = blkcipher_walk_done(desc, &walk, nbytes);
	}

	glue_fpu_end(fpu_enabled);
	return err;
}