Пример #1
0
static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm)
{
	struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);

	crypto_free_aead(ctx->child);
}
Пример #2
0
static void chksum_final(struct crypto_tfm *tfm, u8 *out)
{
	struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
	
	*(__le32 *)out = ~cpu_to_le32(mctx->crc);
}
Пример #3
0
static void crypto_pcbc_exit_tfm(struct crypto_tfm *tfm)
{
	struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm);
	crypto_free_cipher(ctx->child);
}
Пример #4
0
static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
{
	return aes_ctx_common(crypto_tfm_ctx(tfm));
}
Пример #5
0
static void ecb_process(struct crypto_tfm *tfm, u8 *dst, u8 *src,
			cryptfn_t fn, int enc, void *info, int in_place)
{
	fn(crypto_tfm_ctx(tfm), dst, src);
}
Пример #6
0
static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
	struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);

	crypt_s390_km(KM_DEA_DECRYPT, ctx->key, out, in, DES_BLOCK_SIZE);
}
static void cprng_exit(struct crypto_tfm *tfm)
{
	free_prng_context(crypto_tfm_ctx(tfm));
}
Пример #8
0
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		       struct scatterlist *src, unsigned int nbytes)
{
	struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	be128 buf[8];
	struct lrw_crypt_req req = {
		.tbuf = buf,
		.tbuflen = sizeof(buf),

		.table_ctx = &ctx->lrw_table,
		.crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
		.crypt_fn = lrw_xts_encrypt_callback,
	};
	int ret;

	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

	kernel_fpu_begin();
	ret = lrw_crypt(desc, dst, src, nbytes, &req);
	kernel_fpu_end();

	return ret;
}

static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		       struct scatterlist *src, unsigned int nbytes)
{
	struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	be128 buf[8];
	struct lrw_crypt_req req = {
		.tbuf = buf,
		.tbuflen = sizeof(buf),

		.table_ctx = &ctx->lrw_table,
		.crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
		.crypt_fn = lrw_xts_decrypt_callback,
	};
	int ret;

	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;

	kernel_fpu_begin();
	ret = lrw_crypt(desc, dst, src, nbytes, &req);
	kernel_fpu_end();

	return ret;
}

static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
			    unsigned int keylen)
{
	struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
	u32 *flags = &tfm->crt_flags;
	int err;

	/* key consists of keys of equal size concatenated, therefore
	 * the length must be even
	 */
	if (keylen % 2) {
		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
		return -EINVAL;
	}

	/* first half of xts-key is for crypt */
	err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
	if (err)
		return err;

	/* second half of xts-key is for tweak */
	return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
				  keylen / 2);
}


static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
{
	aesni_enc(ctx, out, in);
}

#ifdef CONFIG_X86_64

static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
{
	glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
}
Пример #9
0
static void crypto_rfc3686_exit_tfm(struct crypto_tfm *tfm)
{
	struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm);

	crypto_free_ablkcipher(ctx->child);
}
Пример #10
0
static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
{
    struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);

    crypto_free_ablkcipher(ctx->fallback);
}
Пример #11
0
static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
{
	struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);

	lrw_free_table(&ctx->lrw_table);
}
Пример #12
0
/* public key, exponent, private key */
static int crypto_rsa_set_key(struct crypto_tfm *tfm, const u8 *in_key,
		              unsigned int key_len)
{
	struct crypto_rsa_ctx *ctx = crypto_tfm_ctx(tfm);
	int l;
	int part = 0;
	BIGNUM *x;
	RSA *rsa = &ctx->crr_rsa;

	cleanup_rsa(ctx);

	while (key_len >= 4) {
		l = ntohl(*((uint32_t*) in_key));

		l += 4;

		if (l > key_len)
			return -1;

		x = BN_mpi2bn(in_key, l, NULL);
		if (!x)
			return -1;

//		printk(KERN_INFO "PART %d\n", part);
		switch (part++) {
		case 0:
			rsa->n = x;
			break;

		case 1:
			rsa->e = x;
			break;

		case 2:
			rsa->d = x;
			break;

		case 3:
			rsa->p = x;
			break;

		case 4:
			rsa->q = x;
			break;

		case 5:
			rsa->dmp1 = x;
			break;

		case 6:
			rsa->dmq1 = x;
			break;

		case 7:
			rsa->iqmp = x;
			break;

		default:
			return -1;
		}

		key_len -= l;
		in_key  += l;
	}

	return 0;
}
Пример #13
0
static int camellia_setkey(struct crypto_tfm *tfm, const u8 *in_key,
                           unsigned int key_len)
{
    return __camellia_setkey(crypto_tfm_ctx(tfm), in_key, key_len,
                             &tfm->crt_flags);
}
Пример #14
0
Файл: lzo.c Проект: 274914765/C
static void lzo_exit(struct crypto_tfm *tfm)
{
    struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);

    vfree(ctx->lzo_comp_mem);
}
Пример #15
0
static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
	struct AES_CTX *ctx = crypto_tfm_ctx(tfm);
	AES_decrypt(src, dst, &ctx->dec_key);
}
Пример #16
0
static void init(struct crypto_tfm *tfm)
{
	tfm->__crt_alg->cra_digest.dia_init(crypto_tfm_ctx(tfm));
}
Пример #17
0
static void des3_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
	struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);

	crypt_s390_km(KM_TDEA_192_DECRYPT, ctx->key, dst, src, DES_BLOCK_SIZE);
}
Пример #18
0
static void lz4_exit(struct crypto_tfm *tfm)
{
	struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);

	lz4_free_ctx(NULL, ctx->lz4_comp_mem);
}
Пример #19
0
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		       struct scatterlist *src, unsigned int nbytes)
{
	struct cast6_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	be128 buf[CAST6_PARALLEL_BLOCKS];
	struct crypt_priv crypt_ctx = {
		.ctx = &ctx->cast6_ctx,
		.fpu_enabled = false,
	};
	struct lrw_crypt_req req = {
		.tbuf = buf,
		.tbuflen = sizeof(buf),

		.table_ctx = &ctx->lrw_table,
		.crypt_ctx = &crypt_ctx,
		.crypt_fn = encrypt_callback,
	};
	int ret;

	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
	ret = lrw_crypt(desc, dst, src, nbytes, &req);
	cast6_fpu_end(crypt_ctx.fpu_enabled);

	return ret;
}

static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		       struct scatterlist *src, unsigned int nbytes)
{
	struct cast6_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	be128 buf[CAST6_PARALLEL_BLOCKS];
	struct crypt_priv crypt_ctx = {
		.ctx = &ctx->cast6_ctx,
		.fpu_enabled = false,
	};
	struct lrw_crypt_req req = {
		.tbuf = buf,
		.tbuflen = sizeof(buf),

		.table_ctx = &ctx->lrw_table,
		.crypt_ctx = &crypt_ctx,
		.crypt_fn = decrypt_callback,
	};
	int ret;

	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
	ret = lrw_crypt(desc, dst, src, nbytes, &req);
	cast6_fpu_end(crypt_ctx.fpu_enabled);

	return ret;
}

static void lrw_exit_tfm(struct crypto_tfm *tfm)
{
	struct cast6_lrw_ctx *ctx = crypto_tfm_ctx(tfm);

	lrw_free_table(&ctx->lrw_table);
}

struct cast6_xts_ctx {
	struct cast6_ctx tweak_ctx;
	struct cast6_ctx crypt_ctx;
};

static int xts_cast6_setkey(struct crypto_tfm *tfm, const u8 *key,
			      unsigned int keylen)
{
	struct cast6_xts_ctx *ctx = crypto_tfm_ctx(tfm);
	u32 *flags = &tfm->crt_flags;
	int err;

	/* key consists of keys of equal size concatenated, therefore
	 * the length must be even
	 */
	if (keylen % 2) {
		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
		return -EINVAL;
	}

	/* first half of xts-key is for crypt */
	err = __cast6_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
	if (err)
		return err;

	/* second half of xts-key is for tweak */
	return __cast6_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
			      flags);
}

static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		       struct scatterlist *src, unsigned int nbytes)
{
	struct cast6_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);

	return glue_xts_crypt_128bit(&cast6_enc_xts, desc, dst, src, nbytes,
				     XTS_TWEAK_CAST(__cast6_encrypt),
				     &ctx->tweak_ctx, &ctx->crypt_ctx);
}

static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		       struct scatterlist *src, unsigned int nbytes)
{
	struct cast6_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);

	return glue_xts_crypt_128bit(&cast6_dec_xts, desc, dst, src, nbytes,
				     XTS_TWEAK_CAST(__cast6_encrypt),
				     &ctx->tweak_ctx, &ctx->crypt_ctx);
}

static struct crypto_alg cast6_algs[10] = { {
	.cra_name		= "__ecb-cast6-avx",
	.cra_driver_name	= "__driver-ecb-cast6-avx",
	.cra_priority		= 0,
	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
	.cra_blocksize		= CAST6_BLOCK_SIZE,
	.cra_ctxsize		= sizeof(struct cast6_ctx),
	.cra_alignmask		= 0,
	.cra_type		= &crypto_blkcipher_type,
	.cra_module		= THIS_MODULE,
	.cra_u = {
		.blkcipher = {
			.min_keysize	= CAST6_MIN_KEY_SIZE,
			.max_keysize	= CAST6_MAX_KEY_SIZE,
			.setkey		= cast6_setkey,
			.encrypt	= ecb_encrypt,
			.decrypt	= ecb_decrypt,
		},
	},
}, {
	.cra_name		= "__cbc-cast6-avx",
	.cra_driver_name	= "__driver-cbc-cast6-avx",
	.cra_priority		= 0,
	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
	.cra_blocksize		= CAST6_BLOCK_SIZE,
	.cra_ctxsize		= sizeof(struct cast6_ctx),
	.cra_alignmask		= 0,
	.cra_type		= &crypto_blkcipher_type,
	.cra_module		= THIS_MODULE,
	.cra_u = {
		.blkcipher = {
			.min_keysize	= CAST6_MIN_KEY_SIZE,
			.max_keysize	= CAST6_MAX_KEY_SIZE,
			.setkey		= cast6_setkey,
			.encrypt	= cbc_encrypt,
			.decrypt	= cbc_decrypt,
		},
	},
}, {
	.cra_name		= "__ctr-cast6-avx",
	.cra_driver_name	= "__driver-ctr-cast6-avx",
	.cra_priority		= 0,
	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
	.cra_blocksize		= 1,
	.cra_ctxsize		= sizeof(struct cast6_ctx),
	.cra_alignmask		= 0,
	.cra_type		= &crypto_blkcipher_type,
	.cra_module		= THIS_MODULE,
	.cra_u = {
		.blkcipher = {
			.min_keysize	= CAST6_MIN_KEY_SIZE,
			.max_keysize	= CAST6_MAX_KEY_SIZE,
			.ivsize		= CAST6_BLOCK_SIZE,
			.setkey		= cast6_setkey,
			.encrypt	= ctr_crypt,
			.decrypt	= ctr_crypt,
		},
	},
}, {
	.cra_name		= "__lrw-cast6-avx",
	.cra_driver_name	= "__driver-lrw-cast6-avx",
	.cra_priority		= 0,
	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
	.cra_blocksize		= CAST6_BLOCK_SIZE,
	.cra_ctxsize		= sizeof(struct cast6_lrw_ctx),
	.cra_alignmask		= 0,
	.cra_type		= &crypto_blkcipher_type,
	.cra_module		= THIS_MODULE,
	.cra_exit		= lrw_exit_tfm,
	.cra_u = {
		.blkcipher = {
			.min_keysize	= CAST6_MIN_KEY_SIZE +
					  CAST6_BLOCK_SIZE,
			.max_keysize	= CAST6_MAX_KEY_SIZE +
					  CAST6_BLOCK_SIZE,
			.ivsize		= CAST6_BLOCK_SIZE,
			.setkey		= lrw_cast6_setkey,
			.encrypt	= lrw_encrypt,
			.decrypt	= lrw_decrypt,
		},
	},
}, {
	.cra_name		= "__xts-cast6-avx",
Пример #20
0
static int crypto_fpu_encrypt(struct blkcipher_desc *desc_in,
			      struct scatterlist *dst, struct scatterlist *src,
			      unsigned int nbytes)
{
	int err;
	struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm);
	struct crypto_blkcipher *child = ctx->child;
	struct blkcipher_desc desc = {
		.tfm = child,
		.info = desc_in->info,
		.flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
	};

	kernel_fpu_begin();
	err = crypto_blkcipher_crt(desc.tfm)->encrypt(&desc, dst, src, nbytes);
	kernel_fpu_end();
	return err;
}

static int crypto_fpu_decrypt(struct blkcipher_desc *desc_in,
			      struct scatterlist *dst, struct scatterlist *src,
			      unsigned int nbytes)
{
	int err;
	struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm);
	struct crypto_blkcipher *child = ctx->child;
	struct blkcipher_desc desc = {
		.tfm = child,
		.info = desc_in->info,
		.flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
	};

	kernel_fpu_begin();
	err = crypto_blkcipher_crt(desc.tfm)->decrypt(&desc, dst, src, nbytes);
	kernel_fpu_end();
	return err;
}

static int crypto_fpu_init_tfm(struct crypto_tfm *tfm)
{
	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
	struct crypto_spawn *spawn = crypto_instance_ctx(inst);
	struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm);
	struct crypto_blkcipher *cipher;

	cipher = crypto_spawn_blkcipher(spawn);
	if (IS_ERR(cipher))
		return PTR_ERR(cipher);

	ctx->child = cipher;
	return 0;
}

static void crypto_fpu_exit_tfm(struct crypto_tfm *tfm)
{
	struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm);
	crypto_free_blkcipher(ctx->child);
}

static struct crypto_instance *crypto_fpu_alloc(struct rtattr **tb)
{
	struct crypto_instance *inst;
	struct crypto_alg *alg;
	int err;

	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
	if (err)
		return ERR_PTR(err);

	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
				  CRYPTO_ALG_TYPE_MASK);
	if (IS_ERR(alg))
		return ERR_CAST(alg);

	inst = crypto_alloc_instance("fpu", alg);
	if (IS_ERR(inst))
		goto out_put_alg;

	inst->alg.cra_flags = alg->cra_flags;
	inst->alg.cra_priority = alg->cra_priority;
	inst->alg.cra_blocksize = alg->cra_blocksize;
	inst->alg.cra_alignmask = alg->cra_alignmask;
	inst->alg.cra_type = alg->cra_type;
	inst->alg.cra_blkcipher.ivsize = alg->cra_blkcipher.ivsize;
	inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
	inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
	inst->alg.cra_ctxsize = sizeof(struct crypto_fpu_ctx);
	inst->alg.cra_init = crypto_fpu_init_tfm;
	inst->alg.cra_exit = crypto_fpu_exit_tfm;
	inst->alg.cra_blkcipher.setkey = crypto_fpu_setkey;
	inst->alg.cra_blkcipher.encrypt = crypto_fpu_encrypt;
	inst->alg.cra_blkcipher.decrypt = crypto_fpu_decrypt;

out_put_alg:
	crypto_mod_put(alg);
	return inst;
}

static void crypto_fpu_free(struct crypto_instance *inst)
{
	crypto_drop_spawn(crypto_instance_ctx(inst));
	kfree(inst);
}
Пример #21
0
static int cc_cipher_setkey(struct crypto_ablkcipher *atfm, const u8 *key,
			    unsigned int keylen)
{
	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
	u32 tmp[DES_EXPKEY_WORDS];
	unsigned int max_key_buf_size = get_max_keysize(tfm);

	dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n",
		ctx_p, crypto_tfm_alg_name(tfm), keylen);
	dump_byte_array("key", (u8 *)key, keylen);

	/* STAT_PHASE_0: Init and sanity checks */

	if (validate_keys_sizes(ctx_p, keylen)) {
		dev_err(dev, "Unsupported key size %d.\n", keylen);
		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
		return -EINVAL;
	}

	if (cc_is_hw_key(tfm)) {
		/* setting HW key slots */
		struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key;

		if (ctx_p->flow_mode != S_DIN_to_AES) {
			dev_err(dev, "HW key not supported for non-AES flows\n");
			return -EINVAL;
		}

		ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1);
		if (ctx_p->hw.key1_slot == END_OF_KEYS) {
			dev_err(dev, "Unsupported hw key1 number (%d)\n",
				hki->hw_key1);
			return -EINVAL;
		}

		if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
		    ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
		    ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
			if (hki->hw_key1 == hki->hw_key2) {
				dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
					hki->hw_key1, hki->hw_key2);
				return -EINVAL;
			}
			ctx_p->hw.key2_slot =
				hw_key_to_cc_hw_key(hki->hw_key2);
			if (ctx_p->hw.key2_slot == END_OF_KEYS) {
				dev_err(dev, "Unsupported hw key2 number (%d)\n",
					hki->hw_key2);
				return -EINVAL;
			}
		}

		ctx_p->keylen = keylen;
		dev_dbg(dev, "cc_is_hw_key ret 0");

		return 0;
	}

	// verify weak keys
	if (ctx_p->flow_mode == S_DIN_to_DES) {
		if (!des_ekey(tmp, key) &&
		    (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
			dev_dbg(dev, "weak DES key");
			return -EINVAL;
		}
	}
	if (ctx_p->cipher_mode == DRV_CIPHER_XTS &&
	    xts_check_key(tfm, key, keylen)) {
		dev_dbg(dev, "weak XTS key");
		return -EINVAL;
	}
	if (ctx_p->flow_mode == S_DIN_to_DES &&
	    keylen == DES3_EDE_KEY_SIZE &&
	    cc_verify_3des_keys(key, keylen)) {
		dev_dbg(dev, "weak 3DES key");
		return -EINVAL;
	}

	/* STAT_PHASE_1: Copy key to ctx */
	dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
				max_key_buf_size, DMA_TO_DEVICE);

	memcpy(ctx_p->user.key, key, keylen);
	if (keylen == 24)
		memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);

	if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
		/* sha256 for key2 - use sw implementation */
		int key_len = keylen >> 1;
		int err;

		SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);

		desc->tfm = ctx_p->shash_tfm;

		err = crypto_shash_digest(desc, ctx_p->user.key, key_len,
					  ctx_p->user.key + key_len);
		if (err) {
			dev_err(dev, "Failed to hash ESSIV key.\n");
			return err;
		}
	}
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
		       unsigned int key_len)
{
	return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
}
Пример #23
0
static void crypto_rfc4106_exit_tfm(struct crypto_tfm *tfm)
{
	struct crypto_rfc4106_ctx *ctx = crypto_tfm_ctx(tfm);

	crypto_free_aead(ctx->child);
}
static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));

	aesni_dec(ctx, dst, src);
}
Пример #25
0
static void chksum_init(struct crypto_tfm *tfm)
{
	struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);

	mctx->crc = mctx->key;
}
static void ablk_exit(struct crypto_tfm *tfm)
{
	struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);

	cryptd_free_ablkcipher(ctx->cryptd_tfm);
}
Пример #27
0
/* Perform the key setup. */
int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len)
{

	struct twofish_ctx *ctx = crypto_tfm_ctx(tfm);
	u32 *flags = &tfm->crt_flags;

	int i, j, k;

	/* Temporaries for CALC_K. */
	u32 x, y;

	/* The S vector used to key the S-boxes, split up into individual bytes.
	 * 128-bit keys use only sa through sh; 256-bit use all of them. */
	u8 sa = 0, sb = 0, sc = 0, sd = 0, se = 0, sf = 0, sg = 0, sh = 0;
	u8 si = 0, sj = 0, sk = 0, sl = 0, sm = 0, sn = 0, so = 0, sp = 0;

	/* Temporary for CALC_S. */
	u8 tmp;

	/* Check key length. */
	if (key_len % 8)
	{
		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
		return -EINVAL; /* unsupported key length */
	}

	/* Compute the first two words of the S vector.  The magic numbers are
	 * the entries of the RS matrix, preprocessed through poly_to_exp. The
	 * numbers in the comments are the original (polynomial form) matrix
	 * entries. */
	CALC_S (sa, sb, sc, sd, 0, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
	CALC_S (sa, sb, sc, sd, 1, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
	CALC_S (sa, sb, sc, sd, 2, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
	CALC_S (sa, sb, sc, sd, 3, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
	CALC_S (sa, sb, sc, sd, 4, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
	CALC_S (sa, sb, sc, sd, 5, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
	CALC_S (sa, sb, sc, sd, 6, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
	CALC_S (sa, sb, sc, sd, 7, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
	CALC_S (se, sf, sg, sh, 8, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
	CALC_S (se, sf, sg, sh, 9, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
	CALC_S (se, sf, sg, sh, 10, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
	CALC_S (se, sf, sg, sh, 11, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
	CALC_S (se, sf, sg, sh, 12, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
	CALC_S (se, sf, sg, sh, 13, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
	CALC_S (se, sf, sg, sh, 14, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
	CALC_S (se, sf, sg, sh, 15, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */

	if (key_len == 24 || key_len == 32) { /* 192- or 256-bit key */
		/* Calculate the third word of the S vector */
		CALC_S (si, sj, sk, sl, 16, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
		CALC_S (si, sj, sk, sl, 17, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
		CALC_S (si, sj, sk, sl, 18, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
		CALC_S (si, sj, sk, sl, 19, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
		CALC_S (si, sj, sk, sl, 20, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
		CALC_S (si, sj, sk, sl, 21, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
		CALC_S (si, sj, sk, sl, 22, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
		CALC_S (si, sj, sk, sl, 23, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
	}

	if (key_len == 32) { /* 256-bit key */
		/* Calculate the fourth word of the S vector */
		CALC_S (sm, sn, so, sp, 24, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
		CALC_S (sm, sn, so, sp, 25, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
		CALC_S (sm, sn, so, sp, 26, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
		CALC_S (sm, sn, so, sp, 27, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
		CALC_S (sm, sn, so, sp, 28, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
		CALC_S (sm, sn, so, sp, 29, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
		CALC_S (sm, sn, so, sp, 30, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
		CALC_S (sm, sn, so, sp, 31, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */

		/* Compute the S-boxes. */
		for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
			CALC_SB256_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
		}

		/* Calculate whitening and round subkeys.  The constants are
		 * indices of subkeys, preprocessed through q0 and q1. */
		CALC_K256 (w, 0, 0xA9, 0x75, 0x67, 0xF3);
		CALC_K256 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
		CALC_K256 (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
		CALC_K256 (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
		CALC_K256 (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
		CALC_K256 (k, 2, 0x80, 0xE6, 0x78, 0x6B);
		CALC_K256 (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
		CALC_K256 (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
		CALC_K256 (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
		CALC_K256 (k, 10, 0x35, 0xD8, 0x98, 0xFD);
		CALC_K256 (k, 12, 0x18, 0x37, 0xF7, 0x71);
		CALC_K256 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
		CALC_K256 (k, 16, 0x43, 0x30, 0x75, 0x0F);
		CALC_K256 (k, 18, 0x37, 0xF8, 0x26, 0x1B);
		CALC_K256 (k, 20, 0xFA, 0x87, 0x13, 0xFA);
		CALC_K256 (k, 22, 0x94, 0x06, 0x48, 0x3F);
		CALC_K256 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
		CALC_K256 (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
		CALC_K256 (k, 28, 0x84, 0x8A, 0x54, 0x00);
		CALC_K256 (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
	} else if (key_len == 24) { /* 192-bit key */
		/* Compute the S-boxes. */
		for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
		        CALC_SB192_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
		}

		/* Calculate whitening and round subkeys.  The constants are
		 * indices of subkeys, preprocessed through q0 and q1. */
		CALC_K192 (w, 0, 0xA9, 0x75, 0x67, 0xF3);
		CALC_K192 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
		CALC_K192 (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
		CALC_K192 (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
		CALC_K192 (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
		CALC_K192 (k, 2, 0x80, 0xE6, 0x78, 0x6B);
		CALC_K192 (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
		CALC_K192 (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
		CALC_K192 (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
		CALC_K192 (k, 10, 0x35, 0xD8, 0x98, 0xFD);
		CALC_K192 (k, 12, 0x18, 0x37, 0xF7, 0x71);
		CALC_K192 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
		CALC_K192 (k, 16, 0x43, 0x30, 0x75, 0x0F);
		CALC_K192 (k, 18, 0x37, 0xF8, 0x26, 0x1B);
		CALC_K192 (k, 20, 0xFA, 0x87, 0x13, 0xFA);
		CALC_K192 (k, 22, 0x94, 0x06, 0x48, 0x3F);
		CALC_K192 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
		CALC_K192 (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
		CALC_K192 (k, 28, 0x84, 0x8A, 0x54, 0x00);
		CALC_K192 (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
	} else { /* 128-bit key */
		/* Compute the S-boxes. */
		for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
			CALC_SB_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
		}

		/* Calculate whitening and round subkeys.  The constants are
		 * indices of subkeys, preprocessed through q0 and q1. */
		CALC_K (w, 0, 0xA9, 0x75, 0x67, 0xF3);
		CALC_K (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
		CALC_K (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
		CALC_K (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
		CALC_K (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
		CALC_K (k, 2, 0x80, 0xE6, 0x78, 0x6B);
		CALC_K (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
		CALC_K (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
		CALC_K (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
		CALC_K (k, 10, 0x35, 0xD8, 0x98, 0xFD);
		CALC_K (k, 12, 0x18, 0x37, 0xF7, 0x71);
		CALC_K (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
		CALC_K (k, 16, 0x43, 0x30, 0x75, 0x0F);
		CALC_K (k, 18, 0x37, 0xF8, 0x26, 0x1B);
		CALC_K (k, 20, 0xFA, 0x87, 0x13, 0xFA);
		CALC_K (k, 22, 0x94, 0x06, 0x48, 0x3F);
		CALC_K (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
		CALC_K (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
		CALC_K (k, 28, 0x84, 0x8A, 0x54, 0x00);
		CALC_K (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
	}

	return 0;
}
Пример #28
0
static int async_encrypt(struct ablkcipher_request *req)
{
	struct crypto_tfm *tfm = req->base.tfm;
	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
	struct blkcipher_desc desc = {
		.tfm = __crypto_blkcipher_cast(tfm),
		.info = req->info,
		.flags = req->base.flags,
	};


	return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
}

static int async_decrypt(struct ablkcipher_request *req)
{
	struct crypto_tfm *tfm = req->base.tfm;
	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
	struct blkcipher_desc desc = {
		.tfm = __crypto_blkcipher_cast(tfm),
		.info = req->info,
		.flags = req->base.flags,
	};

	return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
}

static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
					     u32 mask)
{
	struct blkcipher_alg *cipher = &alg->cra_blkcipher;
	unsigned int len = alg->cra_ctxsize;

	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
	    cipher->ivsize) {
		len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
		len += cipher->ivsize;
	}

	return len;
}

static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
{
	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;

	crt->setkey = async_setkey;
	crt->encrypt = async_encrypt;
	crt->decrypt = async_decrypt;
	if (!alg->ivsize) {
		crt->givencrypt = skcipher_null_givencrypt;
		crt->givdecrypt = skcipher_null_givdecrypt;
	}
	crt->base = __crypto_ablkcipher_cast(tfm);
	crt->ivsize = alg->ivsize;

	return 0;
}

static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
{
	struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
	unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
	unsigned long addr;

	crt->setkey = setkey;
	crt->encrypt = alg->encrypt;
	crt->decrypt = alg->decrypt;

	addr = (unsigned long)crypto_tfm_ctx(tfm);
	addr = ALIGN(addr, align);
	addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
	crt->iv = (void *)addr;

	return 0;
}

static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
{
	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;

	if (alg->ivsize > PAGE_SIZE / 8)
		return -EINVAL;

	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
		return crypto_init_blkcipher_ops_sync(tfm);
	else
		return crypto_init_blkcipher_ops_async(tfm);
}

#ifdef CONFIG_NET
static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
{
	struct crypto_report_blkcipher rblkcipher;

	strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
	strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
		sizeof(rblkcipher.geniv));

	rblkcipher.blocksize = alg->cra_blocksize;
	rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
	rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
	rblkcipher.ivsize = alg->cra_blkcipher.ivsize;

	NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
		sizeof(struct crypto_report_blkcipher), &rblkcipher);

	return 0;

nla_put_failure:
	return -EMSGSIZE;
}
#else
static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
{
	return -ENOSYS;
}
#endif

static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
	__attribute__ ((unused));
static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
{
	seq_printf(m, "type         : blkcipher\n");
	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
	seq_printf(m, "min keysize  : %u\n", alg->cra_blkcipher.min_keysize);
	seq_printf(m, "max keysize  : %u\n", alg->cra_blkcipher.max_keysize);
	seq_printf(m, "ivsize       : %u\n", alg->cra_blkcipher.ivsize);
	seq_printf(m, "geniv        : %s\n", alg->cra_blkcipher.geniv ?:
					     "<default>");
}

const struct crypto_type crypto_blkcipher_type = {
	.ctxsize = crypto_blkcipher_ctxsize,
	.init = crypto_init_blkcipher_ops,
#ifdef CONFIG_PROC_FS
	.show = crypto_blkcipher_show,
#endif
	.report = crypto_blkcipher_report,
};
EXPORT_SYMBOL_GPL(crypto_blkcipher_type);

static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
				const char *name, u32 type, u32 mask)
{
	struct crypto_alg *alg;
	int err;

	type = crypto_skcipher_type(type);
	mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;

	alg = crypto_alg_mod_lookup(name, type, mask);
	if (IS_ERR(alg))
		return PTR_ERR(alg);

	err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
	crypto_mod_put(alg);
	return err;
}

struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
					     struct rtattr **tb, u32 type,
					     u32 mask)
{
	struct {
		int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
			      unsigned int keylen);
		int (*encrypt)(struct ablkcipher_request *req);
		int (*decrypt)(struct ablkcipher_request *req);

		unsigned int min_keysize;
		unsigned int max_keysize;
		unsigned int ivsize;

		const char *geniv;
	} balg;
	const char *name;
	struct crypto_skcipher_spawn *spawn;
	struct crypto_attr_type *algt;
	struct crypto_instance *inst;
	struct crypto_alg *alg;
	int err;

	algt = crypto_get_attr_type(tb);
	err = PTR_ERR(algt);
	if (IS_ERR(algt))
		return ERR_PTR(err);

	if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
	    algt->mask)
		return ERR_PTR(-EINVAL);

	name = crypto_attr_alg_name(tb[1]);
	err = PTR_ERR(name);
	if (IS_ERR(name))
		return ERR_PTR(err);

	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
	if (!inst)
		return ERR_PTR(-ENOMEM);

	spawn = crypto_instance_ctx(inst);

	/* Ignore async algorithms if necessary. */
	mask |= crypto_requires_sync(algt->type, algt->mask);

	crypto_set_skcipher_spawn(spawn, inst);
	err = crypto_grab_nivcipher(spawn, name, type, mask);
	if (err)
		goto err_free_inst;

	alg = crypto_skcipher_spawn_alg(spawn);

	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
	    CRYPTO_ALG_TYPE_BLKCIPHER) {
		balg.ivsize = alg->cra_blkcipher.ivsize;
		balg.min_keysize = alg->cra_blkcipher.min_keysize;
		balg.max_keysize = alg->cra_blkcipher.max_keysize;

		balg.setkey = async_setkey;
		balg.encrypt = async_encrypt;
		balg.decrypt = async_decrypt;

		balg.geniv = alg->cra_blkcipher.geniv;
	} else {
		balg.ivsize = alg->cra_ablkcipher.ivsize;
		balg.min_keysize = alg->cra_ablkcipher.min_keysize;
		balg.max_keysize = alg->cra_ablkcipher.max_keysize;

		balg.setkey = alg->cra_ablkcipher.setkey;
		balg.encrypt = alg->cra_ablkcipher.encrypt;
		balg.decrypt = alg->cra_ablkcipher.decrypt;

		balg.geniv = alg->cra_ablkcipher.geniv;
	}

	err = -EINVAL;
	if (!balg.ivsize)
		goto err_drop_alg;

	/*
	 * This is only true if we're constructing an algorithm with its
	 * default IV generator.  For the default generator we elide the
	 * template name and double-check the IV generator.
	 */
	if (algt->mask & CRYPTO_ALG_GENIV) {
		if (!balg.geniv)
			balg.geniv = crypto_default_geniv(alg);
		err = -EAGAIN;
		if (strcmp(tmpl->name, balg.geniv))
			goto err_drop_alg;

		memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
		memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
		       CRYPTO_MAX_ALG_NAME);
	} else {
		err = -ENAMETOOLONG;
		if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
			     "%s(%s)", tmpl->name, alg->cra_name) >=
		    CRYPTO_MAX_ALG_NAME)
			goto err_drop_alg;
		if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
			     "%s(%s)", tmpl->name, alg->cra_driver_name) >=
		    CRYPTO_MAX_ALG_NAME)
			goto err_drop_alg;
	}

	inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
	inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
	inst->alg.cra_priority = alg->cra_priority;
	inst->alg.cra_blocksize = alg->cra_blocksize;
	inst->alg.cra_alignmask = alg->cra_alignmask;
	inst->alg.cra_type = &crypto_givcipher_type;

	inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
	inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
	inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
	inst->alg.cra_ablkcipher.geniv = balg.geniv;

	inst->alg.cra_ablkcipher.setkey = balg.setkey;
	inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
	inst->alg.cra_ablkcipher.decrypt = balg.decrypt;

out:
	return inst;

err_drop_alg:
	crypto_drop_skcipher(spawn);
err_free_inst:
	kfree(inst);
	inst = ERR_PTR(err);
	goto out;
}
EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);

void skcipher_geniv_free(struct crypto_instance *inst)
{
	crypto_drop_skcipher(crypto_instance_ctx(inst));
	kfree(inst);
}
Пример #29
0
static int skcipher_crypt_blkcipher(struct skcipher_request *req,
				    int (*crypt)(struct blkcipher_desc *,
						 struct scatterlist *,
						 struct scatterlist *,
						 unsigned int))
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
	struct blkcipher_desc desc = {
		.tfm = *ctx,
		.info = req->iv,
		.flags = req->base.flags,
	};


	return crypt(&desc, req->dst, req->src, req->cryptlen);
}

static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
{
	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;

	return skcipher_crypt_blkcipher(req, alg->encrypt);
}

static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
{
	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;

	return skcipher_crypt_blkcipher(req, alg->decrypt);
}

static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
{
	struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);

	crypto_free_blkcipher(*ctx);
}

static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
{
	struct crypto_alg *calg = tfm->__crt_alg;
	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
	struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
	struct crypto_blkcipher *blkcipher;
	struct crypto_tfm *btfm;

	if (!crypto_mod_get(calg))
		return -EAGAIN;

	btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
					CRYPTO_ALG_TYPE_MASK);
	if (IS_ERR(btfm)) {
		crypto_mod_put(calg);
		return PTR_ERR(btfm);
	}

	blkcipher = __crypto_blkcipher_cast(btfm);
	*ctx = blkcipher;
	tfm->exit = crypto_exit_skcipher_ops_blkcipher;

	skcipher->setkey = skcipher_setkey_blkcipher;
	skcipher->encrypt = skcipher_encrypt_blkcipher;
	skcipher->decrypt = skcipher_decrypt_blkcipher;

	skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);

	return 0;
}

static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
				      const u8 *key, unsigned int keylen)
{
	struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
	struct crypto_ablkcipher *ablkcipher = *ctx;
	int err;

	crypto_ablkcipher_clear_flags(ablkcipher, ~0);
	crypto_ablkcipher_set_flags(ablkcipher,
				    crypto_skcipher_get_flags(tfm) &
				    CRYPTO_TFM_REQ_MASK);
	err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
	crypto_skcipher_set_flags(tfm,
				  crypto_ablkcipher_get_flags(ablkcipher) &
				  CRYPTO_TFM_RES_MASK);

	return err;
}

static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
				     int (*crypt)(struct ablkcipher_request *))
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
	struct ablkcipher_request *subreq = skcipher_request_ctx(req);

	ablkcipher_request_set_tfm(subreq, *ctx);
	ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
					req->base.complete, req->base.data);
	ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
				     req->iv);

	return crypt(subreq);
}

static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
{
	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;

	return skcipher_crypt_ablkcipher(req, alg->encrypt);
}

static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
{
	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;

	return skcipher_crypt_ablkcipher(req, alg->decrypt);
}

static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
{
	struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);

	crypto_free_ablkcipher(*ctx);
}

static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
{
	struct crypto_alg *calg = tfm->__crt_alg;
	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
	struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
	struct crypto_ablkcipher *ablkcipher;
	struct crypto_tfm *abtfm;

	if (!crypto_mod_get(calg))
		return -EAGAIN;

	abtfm = __crypto_alloc_tfm(calg, 0, 0);
	if (IS_ERR(abtfm)) {
		crypto_mod_put(calg);
		return PTR_ERR(abtfm);
	}

	ablkcipher = __crypto_ablkcipher_cast(abtfm);
	*ctx = ablkcipher;
	tfm->exit = crypto_exit_skcipher_ops_ablkcipher;

	skcipher->setkey = skcipher_setkey_ablkcipher;
	skcipher->encrypt = skcipher_encrypt_ablkcipher;
	skcipher->decrypt = skcipher_decrypt_ablkcipher;

	skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
	skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
			    sizeof(struct ablkcipher_request);

	return 0;
}

static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
{
	if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
		return crypto_init_skcipher_ops_blkcipher(tfm);

	BUG_ON(tfm->__crt_alg->cra_type != &crypto_ablkcipher_type &&
	       tfm->__crt_alg->cra_type != &crypto_givcipher_type);

	return crypto_init_skcipher_ops_ablkcipher(tfm);
}

static const struct crypto_type crypto_skcipher_type2 = {
	.extsize = crypto_skcipher_extsize,
	.init_tfm = crypto_skcipher_init_tfm,
	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
	.maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
	.type = CRYPTO_ALG_TYPE_BLKCIPHER,
	.tfmsize = offsetof(struct crypto_skcipher, base),
};

struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
					      u32 type, u32 mask)
{
	return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);

MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Symmetric key cipher type");
Пример #30
0
static int crypt(struct blkcipher_desc *d,
		 struct blkcipher_walk *w, struct priv *ctx,
		 void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
{
	int err;
	unsigned int avail;
	const int bs = crypto_cipher_blocksize(ctx->child);
	struct sinfo s = {
		.tfm = crypto_cipher_tfm(ctx->child),
		.fn = fn
	};
	be128 *iv;
	u8 *wsrc;
	u8 *wdst;

	err = blkcipher_walk_virt(d, w);
	if (!(avail = w->nbytes))
		return err;

	wsrc = w->src.virt.addr;
	wdst = w->dst.virt.addr;

	/* calculate first value of T */
	iv = (be128 *)w->iv;
	s.t = *iv;

	/* T <- I*Key2 */
	gf128mul_64k_bbe(&s.t, ctx->table);

	goto first;

	for (;;) {
		do {
			/* T <- I*Key2, using the optimization
			 * discussed in the specification */
			be128_xor(&s.t, &s.t, &ctx->mulinc[get_index128(iv)]);
			inc(iv);

first:
			lrw_round(&s, wdst, wsrc);

			wsrc += bs;
			wdst += bs;
		} while ((avail -= bs) >= bs);

		err = blkcipher_walk_done(d, w, avail);
		if (!(avail = w->nbytes))
			break;

		wsrc = w->src.virt.addr;
		wdst = w->dst.virt.addr;
	}

	return err;
}

static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		   struct scatterlist *src, unsigned int nbytes)
{
	struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
	struct blkcipher_walk w;

	blkcipher_walk_init(&w, dst, src, nbytes);
	return crypt(desc, &w, ctx,
		     crypto_cipher_alg(ctx->child)->cia_encrypt);
}

static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
		   struct scatterlist *src, unsigned int nbytes)
{
	struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
	struct blkcipher_walk w;

	blkcipher_walk_init(&w, dst, src, nbytes);
	return crypt(desc, &w, ctx,
		     crypto_cipher_alg(ctx->child)->cia_decrypt);
}

static int init_tfm(struct crypto_tfm *tfm)
{
	struct crypto_cipher *cipher;
	struct crypto_instance *inst = (void *)tfm->__crt_alg;
	struct crypto_spawn *spawn = crypto_instance_ctx(inst);
	struct priv *ctx = crypto_tfm_ctx(tfm);
	u32 *flags = &tfm->crt_flags;

	cipher = crypto_spawn_cipher(spawn);
	if (IS_ERR(cipher))
		return PTR_ERR(cipher);

	if (crypto_cipher_blocksize(cipher) != 16) {
		*flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
		return -EINVAL;
	}

	ctx->child = cipher;
	return 0;
}

static void exit_tfm(struct crypto_tfm *tfm)
{
	struct priv *ctx = crypto_tfm_ctx(tfm);
	if (ctx->table)
		gf128mul_free_64k(ctx->table);
	crypto_free_cipher(ctx->child);
}

static struct crypto_instance *alloc(struct rtattr **tb)
{
	struct crypto_instance *inst;
	struct crypto_alg *alg;
	int err;

	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
	if (err)
		return ERR_PTR(err);

	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
				  CRYPTO_ALG_TYPE_MASK);
	if (IS_ERR(alg))
		return ERR_CAST(alg);

	inst = crypto_alloc_instance("lrw", alg);
	if (IS_ERR(inst))
		goto out_put_alg;

	inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
	inst->alg.cra_priority = alg->cra_priority;
	inst->alg.cra_blocksize = alg->cra_blocksize;

	if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7;
	else inst->alg.cra_alignmask = alg->cra_alignmask;
	inst->alg.cra_type = &crypto_blkcipher_type;

	if (!(alg->cra_blocksize % 4))
		inst->alg.cra_alignmask |= 3;
	inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
	inst->alg.cra_blkcipher.min_keysize =
		alg->cra_cipher.cia_min_keysize + alg->cra_blocksize;
	inst->alg.cra_blkcipher.max_keysize =
		alg->cra_cipher.cia_max_keysize + alg->cra_blocksize;

	inst->alg.cra_ctxsize = sizeof(struct priv);

	inst->alg.cra_init = init_tfm;
	inst->alg.cra_exit = exit_tfm;

	inst->alg.cra_blkcipher.setkey = setkey;
	inst->alg.cra_blkcipher.encrypt = encrypt;
	inst->alg.cra_blkcipher.decrypt = decrypt;

out_put_alg:
	crypto_mod_put(alg);
	return inst;
}

static void free(struct crypto_instance *inst)
{
	crypto_drop_spawn(crypto_instance_ctx(inst));
	kfree(inst);
}