Beispiel #1
0
static int calc_buffer_shash_tfm(const void *buf, loff_t size,
				struct ima_digest_data *hash,
				struct crypto_shash *tfm)
{
	SHASH_DESC_ON_STACK(shash, tfm);
	unsigned int len;
	int rc;

	shash->tfm = tfm;
	shash->flags = 0;

	hash->length = crypto_shash_digestsize(tfm);

	rc = crypto_shash_init(shash);
	if (rc != 0)
		return rc;

	while (size) {
		len = size < PAGE_SIZE ? size : PAGE_SIZE;
		rc = crypto_shash_update(shash, buf, len);
		if (rc)
			break;
		buf += len;
		size -= len;
	}

	if (!rc)
		rc = crypto_shash_final(shash, hash->digest);
	return rc;
}
Beispiel #2
0
char *aa_calc_hash(void *data, size_t len)
{
	SHASH_DESC_ON_STACK(desc, apparmor_tfm);
	char *hash = NULL;
	int error = -ENOMEM;

	if (!apparmor_tfm)
		return NULL;

	hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
	if (!hash)
		goto fail;

	desc->tfm = apparmor_tfm;

	error = crypto_shash_init(desc);
	if (error)
		goto fail;
	error = crypto_shash_update(desc, (u8 *) data, len);
	if (error)
		goto fail;
	error = crypto_shash_final(desc, hash);
	if (error)
		goto fail;

	return hash;

fail:
	kfree(hash);

	return ERR_PTR(error);
}
Beispiel #3
0
static int derive_essiv_salt(const u8 *key, int keysize, u8 *salt)
{
	struct crypto_shash *tfm = READ_ONCE(essiv_hash_tfm);

	/* init hash transform on demand */
	if (unlikely(!tfm)) {
		struct crypto_shash *prev_tfm;

		tfm = crypto_alloc_shash("sha256", 0, 0);
		if (IS_ERR(tfm)) {
			pr_warn_ratelimited("fscrypt: error allocating SHA-256 transform: %ld\n",
					    PTR_ERR(tfm));
			return PTR_ERR(tfm);
		}
		prev_tfm = cmpxchg(&essiv_hash_tfm, NULL, tfm);
		if (prev_tfm) {
			crypto_free_shash(tfm);
			tfm = prev_tfm;
		}
	}

	{
		SHASH_DESC_ON_STACK(desc, tfm);
		desc->tfm = tfm;
		desc->flags = 0;

		return crypto_shash_digest(desc, key, keysize, salt);
	}
}
Beispiel #4
0
static int ima_calc_file_hash_tfm(struct file *file,
				  struct ima_digest_data *hash,
				  struct crypto_shash *tfm)
{
	loff_t i_size, offset = 0;
	char *rbuf;
	int rc, read = 0;
	SHASH_DESC_ON_STACK(shash, tfm);

	shash->tfm = tfm;
	shash->flags = 0;

	hash->length = crypto_shash_digestsize(tfm);

	rc = crypto_shash_init(shash);
	if (rc != 0)
		return rc;

	i_size = i_size_read(file_inode(file));

	if (i_size == 0)
		goto out;

	rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
	if (!rbuf)
		return -ENOMEM;

	if (!(file->f_mode & FMODE_READ)) {
		file->f_mode |= FMODE_READ;
		read = 1;
	}

	while (offset < i_size) {
		int rbuf_len;

		rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE);
		if (rbuf_len < 0) {
			rc = rbuf_len;
			break;
		}
		if (rbuf_len == 0)
			break;
		offset += rbuf_len;

		rc = crypto_shash_update(shash, rbuf, rbuf_len);
		if (rc)
			break;
	}
	if (read)
		file->f_mode &= ~FMODE_READ;
	kfree(rbuf);
out:
	if (!rc)
		rc = crypto_shash_final(shash, hash->digest);
	return rc;
}
Beispiel #5
0
u32 crc32c(u32 crc, const void *address, unsigned int length)
{
	SHASH_DESC_ON_STACK(shash, tfm);
	u32 *ctx = (u32 *)shash_desc_ctx(shash);
	int err;

	shash->tfm = tfm;
	shash->flags = 0;
	*ctx = crc;

	err = crypto_shash_update(shash, address, length);
	BUG_ON(err);

	return *ctx;
}
Beispiel #6
0
static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
			    struct dm_crypt_request *dmreq,
			    u8 *data)
{
	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
	SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
	struct md5_state md5state;
	__le32 buf[4];
	int i, r;

	desc->tfm = lmk->hash_tfm;
	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;

	r = crypto_shash_init(desc);
	if (r)
		return r;

	if (lmk->seed) {
		r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
		if (r)
			return r;
	}

	/* Sector is always 512B, block size 16, add data of blocks 1-31 */
	r = crypto_shash_update(desc, data + 16, 16 * 31);
	if (r)
		return r;

	/* Sector is cropped to 56 bits here */
	buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
	buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
	buf[2] = cpu_to_le32(4024);
	buf[3] = 0;
	r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
	if (r)
		return r;

	/* No MD5 padding here */
	r = crypto_shash_export(desc, &md5state);
	if (r)
		return r;

	for (i = 0; i < MD5_HASH_WORDS; i++)
		__cpu_to_le32s(&md5state.hash[i]);
	memcpy(iv, &md5state.hash, cc->iv_size);

	return 0;
}
Beispiel #7
0
/*
 * Calculate the hash of template data
 */
static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
					 struct ima_template_desc *td,
					 int num_fields,
					 struct ima_digest_data *hash,
					 struct crypto_shash *tfm)
{
	SHASH_DESC_ON_STACK(shash, tfm);
	int rc, i;

	shash->tfm = tfm;
	shash->flags = 0;

	hash->length = crypto_shash_digestsize(tfm);

	rc = crypto_shash_init(shash);
	if (rc != 0)
		return rc;

	for (i = 0; i < num_fields; i++) {
		u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
		u8 *data_to_hash = field_data[i].data;
		u32 datalen = field_data[i].len;
		u32 datalen_to_hash =
		    !ima_canonical_fmt ? datalen : cpu_to_le32(datalen);

		if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
			rc = crypto_shash_update(shash,
						(const u8 *) &datalen_to_hash,
						sizeof(datalen_to_hash));
			if (rc)
				break;
		} else if (strcmp(td->fields[i]->field_id, "n") == 0) {
			memcpy(buffer, data_to_hash, datalen);
			data_to_hash = buffer;
			datalen = IMA_EVENT_NAME_LEN_MAX + 1;
		}
		rc = crypto_shash_update(shash, data_to_hash, datalen);
		if (rc)
			break;
	}

	if (!rc)
		rc = crypto_shash_final(shash, hash->digest);

	return rc;
}
Beispiel #8
0
int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
		u8 *da, u8 *sa, u8 priority,
		u8 *data, size_t data_len, u8 *mic)
{
	SHASH_DESC_ON_STACK(desc, tfm_michael);
	u8 hdr[ETH_HLEN + 2]; /* size of header + padding */
	int err;

	if (tfm_michael == NULL) {
		printk(KERN_WARNING "orinoco_mic: tfm_michael == NULL\n");
		return -1;
	}

	/* Copy header into buffer. We need the padding on the end zeroed */
	memcpy(&hdr[0], da, ETH_ALEN);
	memcpy(&hdr[ETH_ALEN], sa, ETH_ALEN);
	hdr[ETH_ALEN * 2] = priority;
	hdr[ETH_ALEN * 2 + 1] = 0;
	hdr[ETH_ALEN * 2 + 2] = 0;
	hdr[ETH_ALEN * 2 + 3] = 0;

	desc->tfm = tfm_michael;
	desc->flags = 0;

	err = crypto_shash_setkey(tfm_michael, key, MIC_KEYLEN);
	if (err)
		return err;

	err = crypto_shash_init(desc);
	if (err)
		return err;

	err = crypto_shash_update(desc, hdr, sizeof(hdr));
	if (err)
		return err;

	err = crypto_shash_update(desc, data, data_len);
	if (err)
		return err;

	err = crypto_shash_final(desc, mic);
	shash_desc_zero(desc);

	return err;
}
Beispiel #9
0
int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
			 size_t len)
{
	SHASH_DESC_ON_STACK(desc, apparmor_tfm);
	int error = -ENOMEM;
	__le32 le32_version = cpu_to_le32(version);

	if (!aa_g_hash_policy)
		return 0;

	if (!apparmor_tfm)
		return 0;

	profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
	if (!profile->hash)
		goto fail;

	desc->tfm = apparmor_tfm;

	error = crypto_shash_init(desc);
	if (error)
		goto fail;
	error = crypto_shash_update(desc, (u8 *) &le32_version, 4);
	if (error)
		goto fail;
	error = crypto_shash_update(desc, (u8 *) start, len);
	if (error)
		goto fail;
	error = crypto_shash_final(desc, profile->hash);
	if (error)
		goto fail;

	return 0;

fail:
	kfree(profile->hash);
	profile->hash = NULL;

	return error;
}
Beispiel #10
0
static int crypt_iv_tcw_whitening(struct crypt_config *cc,
				  struct dm_crypt_request *dmreq,
				  u8 *data)
{
	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
	u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
	u8 buf[TCW_WHITENING_SIZE];
	SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
	int i, r;

	/* xor whitening with sector number */
	memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
	crypto_xor(buf, (u8 *)&sector, 8);
	crypto_xor(&buf[8], (u8 *)&sector, 8);

	/* calculate crc32 for every 32bit part and xor it */
	desc->tfm = tcw->crc32_tfm;
	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
	for (i = 0; i < 4; i++) {
		r = crypto_shash_init(desc);
		if (r)
			goto out;
		r = crypto_shash_update(desc, &buf[i * 4], 4);
		if (r)
			goto out;
		r = crypto_shash_final(desc, &buf[i * 4]);
		if (r)
			goto out;
	}
	crypto_xor(&buf[0], &buf[12], 4);
	crypto_xor(&buf[4], &buf[8], 4);

	/* apply whitening (8 bytes) to whole sector */
	for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
		crypto_xor(data + i * 8, buf, 8);
out:
	memzero_explicit(buf, sizeof(buf));
	return r;
}
Beispiel #11
0
/*
 * Calculate the boot aggregate hash
 */
static int __init ima_calc_boot_aggregate_tfm(char *digest,
					      struct crypto_shash *tfm)
{
	u8 pcr_i[TPM_DIGEST_SIZE];
	int rc, i;
	SHASH_DESC_ON_STACK(shash, tfm);

	shash->tfm = tfm;
	shash->flags = 0;

	rc = crypto_shash_init(shash);
	if (rc != 0)
		return rc;

	/* cumulative sha1 over tpm registers 0-7 */
	for (i = TPM_PCR0; i < TPM_PCR8; i++) {
		ima_pcrread(i, pcr_i);
		/* now accumulate with current aggregate */
		rc = crypto_shash_update(shash, pcr_i, TPM_DIGEST_SIZE);
	}
	if (!rc)
		crypto_shash_final(shash, digest);
	return rc;
}
Beispiel #12
0
static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
				  struct qat_alg_aead_ctx *ctx,
				  const uint8_t *auth_key,
				  unsigned int auth_keylen)
{
	SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
	int block_size = crypto_shash_blocksize(ctx->hash_tfm);
	int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
	__be32 *hash_state_out;
	__be64 *hash512_state_out;
	int i, offset;

	memset(ctx->ipad, 0, block_size);
	memset(ctx->opad, 0, block_size);
	shash->tfm = ctx->hash_tfm;
	shash->flags = 0x0;

	if (auth_keylen > block_size) {
		int ret = crypto_shash_digest(shash, auth_key,
					      auth_keylen, ctx->ipad);
		if (ret)
			return ret;

		memcpy(ctx->opad, ctx->ipad, digest_size);
	} else {
		memcpy(ctx->ipad, auth_key, auth_keylen);
		memcpy(ctx->opad, auth_key, auth_keylen);
	}

	for (i = 0; i < block_size; i++) {
		char *ipad_ptr = ctx->ipad + i;
		char *opad_ptr = ctx->opad + i;
		*ipad_ptr ^= HMAC_IPAD_VALUE;
		*opad_ptr ^= HMAC_OPAD_VALUE;
	}

	if (crypto_shash_init(shash))
		return -EFAULT;

	if (crypto_shash_update(shash, ctx->ipad, block_size))
		return -EFAULT;

	hash_state_out = (__be32 *)hash->sha.state1;
	hash512_state_out = (__be64 *)hash_state_out;

	switch (ctx->qat_hash_alg) {
	case ICP_QAT_HW_AUTH_ALGO_SHA1:
		if (crypto_shash_export(shash, &ctx->sha1))
			return -EFAULT;
		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
			*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
		break;
	case ICP_QAT_HW_AUTH_ALGO_SHA256:
		if (crypto_shash_export(shash, &ctx->sha256))
			return -EFAULT;
		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
			*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
		break;
	case ICP_QAT_HW_AUTH_ALGO_SHA512:
		if (crypto_shash_export(shash, &ctx->sha512))
			return -EFAULT;
		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
			*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
		break;
	default:
		return -EFAULT;
	}

	if (crypto_shash_init(shash))
		return -EFAULT;

	if (crypto_shash_update(shash, ctx->opad, block_size))
		return -EFAULT;

	offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
	hash_state_out = (__be32 *)(hash->sha.state1 + offset);
	hash512_state_out = (__be64 *)hash_state_out;

	switch (ctx->qat_hash_alg) {
	case ICP_QAT_HW_AUTH_ALGO_SHA1:
		if (crypto_shash_export(shash, &ctx->sha1))
			return -EFAULT;
		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
			*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
		break;
	case ICP_QAT_HW_AUTH_ALGO_SHA256:
		if (crypto_shash_export(shash, &ctx->sha256))
			return -EFAULT;
		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
			*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
		break;
	case ICP_QAT_HW_AUTH_ALGO_SHA512:
		if (crypto_shash_export(shash, &ctx->sha512))
			return -EFAULT;
		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
			*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
		break;
	default:
		return -EFAULT;
	}
	memzero_explicit(ctx->ipad, block_size);
	memzero_explicit(ctx->opad, block_size);
	return 0;
}
Beispiel #13
0
static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
				  struct qat_alg_session_ctx *ctx,
				  const uint8_t *auth_key,
				  unsigned int auth_keylen)
{
	struct qat_auth_state auth_state;
	SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
	struct sha1_state sha1;
	struct sha256_state sha256;
	struct sha512_state sha512;
	int block_size = crypto_shash_blocksize(ctx->hash_tfm);
	int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
	uint8_t *ipad = auth_state.data;
	uint8_t *opad = ipad + block_size;
	__be32 *hash_state_out;
	__be64 *hash512_state_out;
	int i, offset;

	memzero_explicit(auth_state.data, MAX_AUTH_STATE_SIZE + 64);
	shash->tfm = ctx->hash_tfm;
	shash->flags = 0x0;

	if (auth_keylen > block_size) {
		char buff[SHA512_BLOCK_SIZE];
		int ret = crypto_shash_digest(shash, auth_key,
					      auth_keylen, buff);
		if (ret)
			return ret;

		memcpy(ipad, buff, digest_size);
		memcpy(opad, buff, digest_size);
		memzero_explicit(ipad + digest_size, block_size - digest_size);
		memzero_explicit(opad + digest_size, block_size - digest_size);
	} else {
		memcpy(ipad, auth_key, auth_keylen);
		memcpy(opad, auth_key, auth_keylen);
		memzero_explicit(ipad + auth_keylen, block_size - auth_keylen);
		memzero_explicit(opad + auth_keylen, block_size - auth_keylen);
	}

	for (i = 0; i < block_size; i++) {
		char *ipad_ptr = ipad + i;
		char *opad_ptr = opad + i;
		*ipad_ptr ^= 0x36;
		*opad_ptr ^= 0x5C;
	}

	if (crypto_shash_init(shash))
		return -EFAULT;

	if (crypto_shash_update(shash, ipad, block_size))
		return -EFAULT;

	hash_state_out = (__be32 *)hash->sha.state1;
	hash512_state_out = (__be64 *)hash_state_out;

	switch (ctx->qat_hash_alg) {
	case ICP_QAT_HW_AUTH_ALGO_SHA1:
		if (crypto_shash_export(shash, &sha1))
			return -EFAULT;
		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
			*hash_state_out = cpu_to_be32(*(sha1.state + i));
		break;
	case ICP_QAT_HW_AUTH_ALGO_SHA256:
		if (crypto_shash_export(shash, &sha256))
			return -EFAULT;
		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
			*hash_state_out = cpu_to_be32(*(sha256.state + i));
		break;
	case ICP_QAT_HW_AUTH_ALGO_SHA512:
		if (crypto_shash_export(shash, &sha512))
			return -EFAULT;
		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
			*hash512_state_out = cpu_to_be64(*(sha512.state + i));
		break;
	default:
		return -EFAULT;
	}

	if (crypto_shash_init(shash))
		return -EFAULT;

	if (crypto_shash_update(shash, opad, block_size))
		return -EFAULT;

	offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
	hash_state_out = (__be32 *)(hash->sha.state1 + offset);
	hash512_state_out = (__be64 *)hash_state_out;

	switch (ctx->qat_hash_alg) {
	case ICP_QAT_HW_AUTH_ALGO_SHA1:
		if (crypto_shash_export(shash, &sha1))
			return -EFAULT;
		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
			*hash_state_out = cpu_to_be32(*(sha1.state + i));
		break;
	case ICP_QAT_HW_AUTH_ALGO_SHA256:
		if (crypto_shash_export(shash, &sha256))
			return -EFAULT;
		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
			*hash_state_out = cpu_to_be32(*(sha256.state + i));
		break;
	case ICP_QAT_HW_AUTH_ALGO_SHA512:
		if (crypto_shash_export(shash, &sha512))
			return -EFAULT;
		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
			*hash512_state_out = cpu_to_be64(*(sha512.state + i));
		break;
	default:
		return -EFAULT;
	}
	memzero_explicit(ipad, block_size);
	memzero_explicit(opad, block_size);
	return 0;
}
Beispiel #14
0
static int cc_cipher_setkey(struct crypto_ablkcipher *atfm, const u8 *key,
			    unsigned int keylen)
{
	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
	u32 tmp[DES_EXPKEY_WORDS];
	unsigned int max_key_buf_size = get_max_keysize(tfm);

	dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n",
		ctx_p, crypto_tfm_alg_name(tfm), keylen);
	dump_byte_array("key", (u8 *)key, keylen);

	/* STAT_PHASE_0: Init and sanity checks */

	if (validate_keys_sizes(ctx_p, keylen)) {
		dev_err(dev, "Unsupported key size %d.\n", keylen);
		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
		return -EINVAL;
	}

	if (cc_is_hw_key(tfm)) {
		/* setting HW key slots */
		struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key;

		if (ctx_p->flow_mode != S_DIN_to_AES) {
			dev_err(dev, "HW key not supported for non-AES flows\n");
			return -EINVAL;
		}

		ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1);
		if (ctx_p->hw.key1_slot == END_OF_KEYS) {
			dev_err(dev, "Unsupported hw key1 number (%d)\n",
				hki->hw_key1);
			return -EINVAL;
		}

		if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
		    ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
		    ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
			if (hki->hw_key1 == hki->hw_key2) {
				dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
					hki->hw_key1, hki->hw_key2);
				return -EINVAL;
			}
			ctx_p->hw.key2_slot =
				hw_key_to_cc_hw_key(hki->hw_key2);
			if (ctx_p->hw.key2_slot == END_OF_KEYS) {
				dev_err(dev, "Unsupported hw key2 number (%d)\n",
					hki->hw_key2);
				return -EINVAL;
			}
		}

		ctx_p->keylen = keylen;
		dev_dbg(dev, "cc_is_hw_key ret 0");

		return 0;
	}

	// verify weak keys
	if (ctx_p->flow_mode == S_DIN_to_DES) {
		if (!des_ekey(tmp, key) &&
		    (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
			dev_dbg(dev, "weak DES key");
			return -EINVAL;
		}
	}
	if (ctx_p->cipher_mode == DRV_CIPHER_XTS &&
	    xts_check_key(tfm, key, keylen)) {
		dev_dbg(dev, "weak XTS key");
		return -EINVAL;
	}
	if (ctx_p->flow_mode == S_DIN_to_DES &&
	    keylen == DES3_EDE_KEY_SIZE &&
	    cc_verify_3des_keys(key, keylen)) {
		dev_dbg(dev, "weak 3DES key");
		return -EINVAL;
	}

	/* STAT_PHASE_1: Copy key to ctx */
	dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
				max_key_buf_size, DMA_TO_DEVICE);

	memcpy(ctx_p->user.key, key, keylen);
	if (keylen == 24)
		memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);

	if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
		/* sha256 for key2 - use sw implementation */
		int key_len = keylen >> 1;
		int err;

		SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);

		desc->tfm = ctx_p->shash_tfm;

		err = crypto_shash_digest(desc, ctx_p->user.key, key_len,
					  ctx_p->user.key + key_len);
		if (err) {
			dev_err(dev, "Failed to hash ESSIV key.\n");
			return err;
		}
	}
Beispiel #15
0
static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
			    unsigned int keylen)
{
	struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm);
	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
	u32 tmp[DES3_EDE_EXPKEY_WORDS];
	struct cc_crypto_alg *cc_alg =
			container_of(tfm->__crt_alg, struct cc_crypto_alg,
				     skcipher_alg.base);
	unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;

	dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n",
		ctx_p, crypto_tfm_alg_name(tfm), keylen);
	dump_byte_array("key", (u8 *)key, keylen);

	/* STAT_PHASE_0: Init and sanity checks */

	if (validate_keys_sizes(ctx_p, keylen)) {
		dev_err(dev, "Unsupported key size %d.\n", keylen);
		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
		return -EINVAL;
	}

	ctx_p->hw_key = false;

	/*
	 * Verify DES weak keys
	 * Note that we're dropping the expanded key since the
	 * HW does the expansion on its own.
	 */
	if (ctx_p->flow_mode == S_DIN_to_DES) {
		if (keylen == DES3_EDE_KEY_SIZE &&
		    __des3_ede_setkey(tmp, &tfm->crt_flags, key,
				      DES3_EDE_KEY_SIZE)) {
			dev_dbg(dev, "weak 3DES key");
			return -EINVAL;
		} else if (!des_ekey(tmp, key) &&
			   (crypto_tfm_get_flags(tfm) &
			    CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
			dev_dbg(dev, "weak DES key");
			return -EINVAL;
		}
	}

	if (ctx_p->cipher_mode == DRV_CIPHER_XTS &&
	    xts_check_key(tfm, key, keylen)) {
		dev_dbg(dev, "weak XTS key");
		return -EINVAL;
	}

	/* STAT_PHASE_1: Copy key to ctx */
	dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
				max_key_buf_size, DMA_TO_DEVICE);

	memcpy(ctx_p->user.key, key, keylen);
	if (keylen == 24)
		memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);

	if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
		/* sha256 for key2 - use sw implementation */
		int key_len = keylen >> 1;
		int err;

		SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);

		desc->tfm = ctx_p->shash_tfm;

		err = crypto_shash_digest(desc, ctx_p->user.key, key_len,
					  ctx_p->user.key + key_len);
		if (err) {
			dev_err(dev, "Failed to hash ESSIV key.\n");
			return err;
		}
	}