Ejemplo n.º 1
0
static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
			    struct dm_crypt_request *dmreq,
			    u8 *data)
{
	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
	struct {
		struct shash_desc desc;
		char ctx[crypto_shash_descsize(lmk->hash_tfm)];
	} sdesc;
	struct md5_state md5state;
	u32 buf[4];
	int i, r;

	sdesc.desc.tfm = lmk->hash_tfm;
	sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;

	r = crypto_shash_init(&sdesc.desc);
	if (r)
		return r;

	if (lmk->seed) {
		r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE);
		if (r)
			return r;
	}

	/* Sector is always 512B, block size 16, add data of blocks 1-31 */
	r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31);
	if (r)
		return r;

	/* Sector is cropped to 56 bits here */
	buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
	buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
	buf[2] = cpu_to_le32(4024);
	buf[3] = 0;
	r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf));
	if (r)
		return r;

	/* No MD5 padding here */
	r = crypto_shash_export(&sdesc.desc, &md5state);
	if (r)
		return r;

	for (i = 0; i < MD5_HASH_WORDS; i++)
		__cpu_to_le32s(&md5state.hash[i]);
	memcpy(iv, &md5state.hash, cc->iv_size);

	return 0;
}
Ejemplo n.º 2
0
/*
 * Calculate the HMAC value across the set of protected security xattrs.
 *
 * Instead of retrieving the requested xattr, for performance, calculate
 * the hmac using the requested xattr value. Don't alloc/free memory for
 * each xattr, but attempt to re-use the previously allocated memory.
 */
static int evm_calc_hmac_or_hash(struct dentry *dentry,
				const char *req_xattr_name,
				const char *req_xattr_value,
				size_t req_xattr_value_len,
				char type, char *digest)
{
	struct inode *inode = dentry->d_inode;
	struct shash_desc *desc;
	char **xattrname;
	size_t xattr_size = 0;
	char *xattr_value = NULL;
	int error;
	int size;

	if (!inode->i_op || !inode->i_op->getxattr)
		return -EOPNOTSUPP;
	desc = init_desc(type);
	if (IS_ERR(desc))
		return PTR_ERR(desc);

	error = -ENODATA;
	for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) {
		if ((req_xattr_name && req_xattr_value)
		    && !strcmp(*xattrname, req_xattr_name)) {
			error = 0;
			crypto_shash_update(desc, (const u8 *)req_xattr_value,
					     req_xattr_value_len);
			continue;
		}
		size = vfs_getxattr_alloc(dentry, *xattrname,
					  &xattr_value, xattr_size, GFP_NOFS);
		if (size == -ENOMEM) {
			error = -ENOMEM;
			goto out;
		}
		if (size < 0)
			continue;

		error = 0;
		xattr_size = size;
		crypto_shash_update(desc, (const u8 *)xattr_value, xattr_size);
	}
	hmac_add_misc(desc, inode, digest);

out:
	kfree(xattr_value);
	kfree(desc);
	return error;
}
Ejemplo n.º 3
0
static int calc_buffer_shash_tfm(const void *buf, loff_t size,
				struct ima_digest_data *hash,
				struct crypto_shash *tfm)
{
	SHASH_DESC_ON_STACK(shash, tfm);
	unsigned int len;
	int rc;

	shash->tfm = tfm;
	shash->flags = 0;

	hash->length = crypto_shash_digestsize(tfm);

	rc = crypto_shash_init(shash);
	if (rc != 0)
		return rc;

	while (size) {
		len = size < PAGE_SIZE ? size : PAGE_SIZE;
		rc = crypto_shash_update(shash, buf, len);
		if (rc)
			break;
		buf += len;
		size -= len;
	}

	if (!rc)
		rc = crypto_shash_final(shash, hash->digest);
	return rc;
}
Ejemplo n.º 4
0
char *aa_calc_hash(void *data, size_t len)
{
	SHASH_DESC_ON_STACK(desc, apparmor_tfm);
	char *hash = NULL;
	int error = -ENOMEM;

	if (!apparmor_tfm)
		return NULL;

	hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
	if (!hash)
		goto fail;

	desc->tfm = apparmor_tfm;

	error = crypto_shash_init(desc);
	if (error)
		goto fail;
	error = crypto_shash_update(desc, (u8 *) data, len);
	if (error)
		goto fail;
	error = crypto_shash_final(desc, hash);
	if (error)
		goto fail;

	return hash;

fail:
	kfree(hash);

	return ERR_PTR(error);
}
Ejemplo n.º 5
0
static int mv_hash_final_fallback(struct ahash_request *req)
{
	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
	struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
	struct {
		struct shash_desc shash;
		char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
	} desc;
	int rc;

	desc.shash.tfm = tfm_ctx->fallback;
	desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
	if (unlikely(req_ctx->first_hash)) {
		crypto_shash_init(&desc.shash);
		crypto_shash_update(&desc.shash, req_ctx->buffer,
				    req_ctx->extra_bytes);
	} else {
		/* only SHA1 for now....
		 */
		rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
		if (rc)
			goto out;
	}
	rc = crypto_shash_final(&desc.shash, req->result);
out:
	return rc;
}
Ejemplo n.º 6
0
static int
CalcNTLMv2_response(const struct cifsSesInfo *ses, char *ntlmv2_hash)
{
	int rc;
	unsigned int offset = CIFS_SESS_KEY_SIZE + 8;

	if (!ses->server->secmech.sdeschmacmd5) {
		cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
		return -1;
	}

	crypto_shash_setkey(ses->server->secmech.hmacmd5,
				ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);

	rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash);
	if (rc) {
		cERROR(1, "CalcNTLMv2_response: could not init hmacmd5");
		return rc;
	}

	if (ses->server->secType == RawNTLMSSP)
		memcpy(ses->auth_key.response + offset,
			ses->ntlmssp->cryptkey, CIFS_SERVER_CHALLENGE_SIZE);
	else
		memcpy(ses->auth_key.response + offset,
			ses->server->cryptkey, CIFS_SERVER_CHALLENGE_SIZE);
	crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
		ses->auth_key.response + offset, ses->auth_key.len - offset);

	rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash,
		ses->auth_key.response + CIFS_SESS_KEY_SIZE);

	return rc;
}
Ejemplo n.º 7
0
/*
 * Calculate the boot aggregate hash
 */
int __init ima_calc_boot_aggregate(char *digest)
{
	u8 pcr_i[IMA_DIGEST_SIZE];
	int rc, i;
	struct {
		struct shash_desc shash;
		char ctx[crypto_shash_descsize(ima_shash_tfm)];
	} desc;

	desc.shash.tfm = ima_shash_tfm;
	desc.shash.flags = 0;

	rc = crypto_shash_init(&desc.shash);
	if (rc != 0)
		return rc;

	/* cumulative sha1 over tpm registers 0-7 */
	for (i = TPM_PCR0; i < TPM_PCR8; i++) {
		ima_pcrread(i, pcr_i);
		/* now accumulate with current aggregate */
		rc = crypto_shash_update(&desc.shash, pcr_i, IMA_DIGEST_SIZE);
	}
	if (!rc)
		crypto_shash_final(&desc.shash, digest);
	return rc;
}
Ejemplo n.º 8
0
static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
				struct TCP_Server_Info *server, char *signature)
{
	int i;
	int rc;

	if (iov == NULL || signature == NULL || server == NULL)
		return -EINVAL;

	if (!server->secmech.sdescmd5) {
		cERROR(1, "%s: Can't generate signature\n", __func__);
		return -1;
	}

	rc = crypto_shash_init(&server->secmech.sdescmd5->shash);
	if (rc) {
		cERROR(1, "%s: Oould not init md5\n", __func__);
		return rc;
	}

	crypto_shash_update(&server->secmech.sdescmd5->shash,
		server->session_key.response, server->session_key.len);

	for (i = 0; i < n_vec; i++) {
		if (iov[i].iov_len == 0)
			continue;
		if (iov[i].iov_base == NULL) {
			cERROR(1, "null iovec entry");
			return -EIO;
		}
		/* The first entry includes a length field (which does not get
		   signed that occupies the first 4 bytes before the header */
		if (i == 0) {
			if (iov[0].iov_len <= 8) /* cmd field at offset 9 */
				break; /* nothing to sign or corrupt header */
			crypto_shash_update(&server->secmech.sdescmd5->shash,
				iov[i].iov_base + 4, iov[i].iov_len - 4);
		} else
			crypto_shash_update(&server->secmech.sdescmd5->shash,
				iov[i].iov_base, iov[i].iov_len);
	}

	rc = crypto_shash_final(&server->secmech.sdescmd5->shash, signature);

	return rc;
}
Ejemplo n.º 9
0
static int padlock_sha_update(struct shash_desc *desc,
			      const u8 *data, unsigned int length)
{
	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);

	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
	return crypto_shash_update(&dctx->fallback, data, length);
}
Ejemplo n.º 10
0
/*
 * Calculate the hash of template data
 */
static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
					 struct ima_template_desc *td,
					 int num_fields,
					 struct ima_digest_data *hash,
					 struct crypto_shash *tfm)
{
	SHASH_DESC_ON_STACK(shash, tfm);
	int rc, i;

	shash->tfm = tfm;
	shash->flags = 0;

	hash->length = crypto_shash_digestsize(tfm);

	rc = crypto_shash_init(shash);
	if (rc != 0)
		return rc;

	for (i = 0; i < num_fields; i++) {
		u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
		u8 *data_to_hash = field_data[i].data;
		u32 datalen = field_data[i].len;
		u32 datalen_to_hash =
		    !ima_canonical_fmt ? datalen : cpu_to_le32(datalen);

		if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
			rc = crypto_shash_update(shash,
						(const u8 *) &datalen_to_hash,
						sizeof(datalen_to_hash));
			if (rc)
				break;
		} else if (strcmp(td->fields[i]->field_id, "n") == 0) {
			memcpy(buffer, data_to_hash, datalen);
			data_to_hash = buffer;
			datalen = IMA_EVENT_NAME_LEN_MAX + 1;
		}
		rc = crypto_shash_update(shash, data_to_hash, datalen);
		if (rc)
			break;
	}

	if (!rc)
		rc = crypto_shash_final(shash, hash->digest);

	return rc;
}
Ejemplo n.º 11
0
int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
		u8 *da, u8 *sa, u8 priority,
		u8 *data, size_t data_len, u8 *mic)
{
	SHASH_DESC_ON_STACK(desc, tfm_michael);
	u8 hdr[ETH_HLEN + 2]; /* size of header + padding */
	int err;

	if (tfm_michael == NULL) {
		printk(KERN_WARNING "orinoco_mic: tfm_michael == NULL\n");
		return -1;
	}

	/* Copy header into buffer. We need the padding on the end zeroed */
	memcpy(&hdr[0], da, ETH_ALEN);
	memcpy(&hdr[ETH_ALEN], sa, ETH_ALEN);
	hdr[ETH_ALEN * 2] = priority;
	hdr[ETH_ALEN * 2 + 1] = 0;
	hdr[ETH_ALEN * 2 + 2] = 0;
	hdr[ETH_ALEN * 2 + 3] = 0;

	desc->tfm = tfm_michael;
	desc->flags = 0;

	err = crypto_shash_setkey(tfm_michael, key, MIC_KEYLEN);
	if (err)
		return err;

	err = crypto_shash_init(desc);
	if (err)
		return err;

	err = crypto_shash_update(desc, hdr, sizeof(hdr));
	if (err)
		return err;

	err = crypto_shash_update(desc, data, data_len);
	if (err)
		return err;

	err = crypto_shash_final(desc, mic);
	shash_desc_zero(desc);

	return err;
}
Ejemplo n.º 12
0
static int ima_calc_file_hash_tfm(struct file *file,
				  struct ima_digest_data *hash,
				  struct crypto_shash *tfm)
{
	loff_t i_size, offset = 0;
	char *rbuf;
	int rc, read = 0;
	SHASH_DESC_ON_STACK(shash, tfm);

	shash->tfm = tfm;
	shash->flags = 0;

	hash->length = crypto_shash_digestsize(tfm);

	rc = crypto_shash_init(shash);
	if (rc != 0)
		return rc;

	i_size = i_size_read(file_inode(file));

	if (i_size == 0)
		goto out;

	rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
	if (!rbuf)
		return -ENOMEM;

	if (!(file->f_mode & FMODE_READ)) {
		file->f_mode |= FMODE_READ;
		read = 1;
	}

	while (offset < i_size) {
		int rbuf_len;

		rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE);
		if (rbuf_len < 0) {
			rc = rbuf_len;
			break;
		}
		if (rbuf_len == 0)
			break;
		offset += rbuf_len;

		rc = crypto_shash_update(shash, rbuf, rbuf_len);
		if (rc)
			break;
	}
	if (read)
		file->f_mode &= ~FMODE_READ;
	kfree(rbuf);
out:
	if (!rc)
		rc = crypto_shash_final(shash, hash->digest);
	return rc;
}
Ejemplo n.º 13
0
int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
			 size_t len)
{
	SHASH_DESC_ON_STACK(desc, apparmor_tfm);
	int error = -ENOMEM;
	__le32 le32_version = cpu_to_le32(version);

	if (!aa_g_hash_policy)
		return 0;

	if (!apparmor_tfm)
		return 0;

	profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
	if (!profile->hash)
		goto fail;

	desc->tfm = apparmor_tfm;

	error = crypto_shash_init(desc);
	if (error)
		goto fail;
	error = crypto_shash_update(desc, (u8 *) &le32_version, 4);
	if (error)
		goto fail;
	error = crypto_shash_update(desc, (u8 *) start, len);
	if (error)
		goto fail;
	error = crypto_shash_final(desc, profile->hash);
	if (error)
		goto fail;

	return 0;

fail:
	kfree(profile->hash);
	profile->hash = NULL;

	return error;
}
Ejemplo n.º 14
0
/*
 * Calculate and return the CIFS signature based on the mac key and SMB PDU.
 * The 16 byte signature must be allocated by the caller. Note we only use the
 * 1st eight bytes and that the smb header signature field on input contains
 * the sequence number before this function is called. Also, this function
 * should be called with the server->srv_mutex held.
 */
static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
				struct TCP_Server_Info *server, char *signature)
{
	int rc;

	if (cifs_pdu == NULL || signature == NULL || server == NULL)
		return -EINVAL;

	if (!server->secmech.sdescmd5) {
		cERROR(1, "%s: Can't generate signature\n", __func__);
		return -1;
	}

	rc = crypto_shash_init(&server->secmech.sdescmd5->shash);
	if (rc) {
		cERROR(1, "%s: Could not init md5\n", __func__);
		return rc;
	}

	rc = crypto_shash_update(&server->secmech.sdescmd5->shash,
		server->session_key.response, server->session_key.len);
	if (rc) {
		cERROR(1, "%s: Could not update with response\n", __func__);
		return rc;
	}

	rc = crypto_shash_update(&server->secmech.sdescmd5->shash,
		cifs_pdu->Protocol, be32_to_cpu(cifs_pdu->smb_buf_length));
	if (rc) {
		cERROR(1, "%s: Could not update with payload\n", __func__);
		return rc;
	}

	rc = crypto_shash_final(&server->secmech.sdescmd5->shash, signature);
	if (rc)
		cERROR(1, "%s: Could not generate md5 hash\n", __func__);

	return rc;
}
Ejemplo n.º 15
0
/*
 * Calculate the MD5/SHA1 file digest
 */
int ima_calc_file_hash(struct file *file, char *digest)
{
	loff_t i_size, offset = 0;
	char *rbuf;
	int rc, read = 0;
	struct {
		struct shash_desc shash;
		char ctx[crypto_shash_descsize(ima_shash_tfm)];
	} desc;

	desc.shash.tfm = ima_shash_tfm;
	desc.shash.flags = 0;

	rc = crypto_shash_init(&desc.shash);
	if (rc != 0)
		return rc;

	rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
	if (!rbuf) {
		rc = -ENOMEM;
		goto out;
	}
	if (!(file->f_mode & FMODE_READ)) {
		file->f_mode |= FMODE_READ;
		read = 1;
	}
	i_size = i_size_read(file->f_dentry->d_inode);
	while (offset < i_size) {
		int rbuf_len;

		rbuf_len = kernel_read(file, offset, rbuf, PAGE_SIZE);
		if (rbuf_len < 0) {
			rc = rbuf_len;
			break;
		}
		if (rbuf_len == 0)
			break;
		offset += rbuf_len;

		rc = crypto_shash_update(&desc.shash, rbuf, rbuf_len);
		if (rc)
			break;
	}
	kfree(rbuf);
	if (!rc)
		rc = crypto_shash_final(&desc.shash, digest);
	if (read)
		file->f_mode &= ~FMODE_READ;
out:
	return rc;
}
Ejemplo n.º 16
0
/* Protect against 'cutting & pasting' security.evm xattr, include inode
 * specific info.
 *
 * (Additional directory/file metadata needs to be added for more complete
 * protection.)
 */
static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
			  char *digest)
{
	struct h_misc {
		unsigned long ino;
		__u32 generation;
		uid_t uid;
		gid_t gid;
		umode_t mode;
	} hmac_misc;

	memset(&hmac_misc, 0, sizeof(hmac_misc));
	hmac_misc.ino = inode->i_ino;
	hmac_misc.generation = inode->i_generation;
	hmac_misc.uid = from_kuid(&init_user_ns, inode->i_uid);
	hmac_misc.gid = from_kgid(&init_user_ns, inode->i_gid);
	hmac_misc.mode = inode->i_mode;
	crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc));
	if (evm_hmac_version > 1)
		crypto_shash_update(desc, inode->i_sb->s_uuid,
				    sizeof(inode->i_sb->s_uuid));
	crypto_shash_final(desc, digest);
}
Ejemplo n.º 17
0
u32 crc32c(u32 crc, const void *address, unsigned int length)
{
	SHASH_DESC_ON_STACK(shash, tfm);
	u32 *ctx = (u32 *)shash_desc_ctx(shash);
	int err;

	shash->tfm = tfm;
	shash->flags = 0;
	*ctx = crc;

	err = crypto_shash_update(shash, address, length);
	BUG_ON(err);

	return *ctx;
}
Ejemplo n.º 18
0
int evm_init_hmac(struct inode *inode, const struct xattr *lsm_xattr,
		  char *hmac_val)
{
	struct shash_desc *desc;

	desc = init_desc();
	if (IS_ERR(desc)) {
		printk(KERN_INFO "init_desc failed\n");
		return PTR_ERR(desc);
	}

	crypto_shash_update(desc, lsm_xattr->value, lsm_xattr->value_len);
	hmac_add_misc(desc, inode, hmac_val);
	kfree(desc);
	return 0;
}
Ejemplo n.º 19
0
/*
 * calculate authorization info fields to send to TPM
 */
static int TSS_authhmac(unsigned char *digest, const unsigned char *key,
			unsigned int keylen, unsigned char *h1,
			unsigned char *h2, unsigned char h3, ...)
{
	unsigned char paramdigest[SHA1_DIGEST_SIZE];
	struct sdesc *sdesc;
	unsigned int dlen;
	unsigned char *data;
	unsigned char c;
	int ret;
	va_list argp;

	sdesc = init_sdesc(hashalg);
	if (IS_ERR(sdesc)) {
		pr_info("trusted_key: can't alloc %s\n", hash_alg);
		return PTR_ERR(sdesc);
	}

	c = h3;
	ret = crypto_shash_init(&sdesc->shash);
	if (ret < 0)
		goto out;
	va_start(argp, h3);
	for (;;) {
		dlen = va_arg(argp, unsigned int);
		if (dlen == 0)
			break;
		data = va_arg(argp, unsigned char *);
		if (!data) {
			ret = -EINVAL;
			break;
		}
		ret = crypto_shash_update(&sdesc->shash, data, dlen);
		if (ret < 0)
			break;
	}
	va_end(argp);
	if (!ret)
		ret = crypto_shash_final(&sdesc->shash, paramdigest);
	if (!ret)
		ret = TSS_rawhmac(digest, key, keylen, SHA1_DIGEST_SIZE,
				  paramdigest, TPM_NONCE_SIZE, h1,
				  TPM_NONCE_SIZE, h2, 1, &c, 0, 0);
out:
	kfree(sdesc);
	return ret;
}
Ejemplo n.º 20
0
__u16 crc_t10dif(const unsigned char *buffer, size_t len)
{
	struct {
		struct shash_desc shash;
		char ctx[2];
	} desc;
	int err;

	desc.shash.tfm = crct10dif_tfm;
	desc.shash.flags = 0;
	*(__u16 *)desc.ctx = 0;

	err = crypto_shash_update(&desc.shash, buffer, len);
	BUG_ON(err);

	return *(__u16 *)desc.ctx;
}
Ejemplo n.º 21
0
u32 crc32c(u32 crc, const void *address, unsigned int length)
{
	struct {
		struct shash_desc shash;
		char ctx[crypto_shash_descsize(tfm)];
	} desc;
	int err;

	desc.shash.tfm = tfm;
	desc.shash.flags = 0;
	*(u32 *)desc.ctx = crc;

	err = crypto_shash_update(&desc.shash, address, length);
	BUG_ON(err);

	return *(u32 *)desc.ctx;
}
Ejemplo n.º 22
0
static int
CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
{
	int rc;
	unsigned int offset = CIFS_SESS_KEY_SIZE + 8;

	if (!ses->server->secmech.sdeschmacmd5) {
		cifs_dbg(VFS, "%s: can't generate ntlmv2 hash\n", __func__);
		return -1;
	}

	rc = crypto_shash_setkey(ses->server->secmech.hmacmd5,
				ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
	if (rc) {
		cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n",
			 __func__);
		return rc;
	}

	rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash);
	if (rc) {
		cifs_dbg(VFS, "%s: could not init hmacmd5\n", __func__);
		return rc;
	}

	if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED)
		memcpy(ses->auth_key.response + offset,
			ses->ntlmssp->cryptkey, CIFS_SERVER_CHALLENGE_SIZE);
	else
		memcpy(ses->auth_key.response + offset,
			ses->server->cryptkey, CIFS_SERVER_CHALLENGE_SIZE);
	rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
		ses->auth_key.response + offset, ses->auth_key.len - offset);
	if (rc) {
		cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
		return rc;
	}

	rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash,
		ses->auth_key.response + CIFS_SESS_KEY_SIZE);
	if (rc)
		cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);

	return rc;
}
Ejemplo n.º 23
0
static int p8_ghash_update(struct shash_desc *desc,
        const u8 *src, unsigned int srclen)
{
    unsigned int len;
    struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
    struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);

    if (IN_INTERRUPT) {
        return crypto_shash_update(&dctx->fallback_desc, src, srclen);
    } else {
        if (dctx->bytes) {
            if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
                memcpy(dctx->buffer + dctx->bytes, src, srclen);
                dctx->bytes += srclen;
                return 0;
            }
            memcpy(dctx->buffer + dctx->bytes, src,
                    GHASH_DIGEST_SIZE - dctx->bytes);
            pagefault_disable();
            enable_kernel_altivec();
            enable_kernel_fp();
            gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
                    GHASH_DIGEST_SIZE);
            pagefault_enable();
            src += GHASH_DIGEST_SIZE - dctx->bytes;
            srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
            dctx->bytes = 0;
        }
        len = srclen & ~(GHASH_DIGEST_SIZE - 1);
        if (len) {
            pagefault_disable();
            enable_kernel_altivec();
            enable_kernel_fp();
            gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
            pagefault_enable();
            src += len;
            srclen -= len;
        }
        if (srclen) {
            memcpy(dctx->buffer, src, srclen);
            dctx->bytes = srclen;
        }
        return 0;
    }
}
Ejemplo n.º 24
0
static int
symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash)
{
	int rc;
	unsigned int size;
	struct crypto_shash *md5;
	struct sdesc *sdescmd5;

	md5 = crypto_alloc_shash("md5", 0, 0);
	if (IS_ERR(md5)) {
		rc = PTR_ERR(md5);
		cERROR(1, "%s: Crypto md5 allocation error %d\n", __func__, rc);
		return rc;
	}
	size = sizeof(struct shash_desc) + crypto_shash_descsize(md5);
	sdescmd5 = kmalloc(size, GFP_KERNEL);
	if (!sdescmd5) {
		rc = -ENOMEM;
		cERROR(1, "%s: Memory allocation failure\n", __func__);
		goto symlink_hash_err;
	}
	sdescmd5->shash.tfm = md5;
	sdescmd5->shash.flags = 0x0;

	rc = crypto_shash_init(&sdescmd5->shash);
	if (rc) {
		cERROR(1, "%s: Could not init md5 shash\n", __func__);
		goto symlink_hash_err;
	}
	rc = crypto_shash_update(&sdescmd5->shash, link_str, link_len);
	if (rc) {
		cERROR(1, "%s: Could not update iwth link_str\n", __func__);
		goto symlink_hash_err;
	}
	rc = crypto_shash_final(&sdescmd5->shash, md5_hash);
	if (rc)
		cERROR(1, "%s: Could not generate md5 hash\n", __func__);

symlink_hash_err:
	crypto_free_shash(md5);
	kfree(sdescmd5);

	return rc;
}
Ejemplo n.º 25
0
static int TSS_rawhmac(unsigned char *digest, const unsigned char *key,
		       unsigned int keylen, ...)
{
	struct sdesc *sdesc;
	va_list argp;
	unsigned int dlen;
	unsigned char *data;
	int ret;

	sdesc = init_sdesc(hmacalg);
	if (IS_ERR(sdesc)) {
		pr_info("trusted_key: can't alloc %s\n", hmac_alg);
		return PTR_ERR(sdesc);
	}

	ret = crypto_shash_setkey(hmacalg, key, keylen);
	if (ret < 0)
		goto out;
	ret = crypto_shash_init(&sdesc->shash);
	if (ret < 0)
		goto out;

	va_start(argp, keylen);
	for (;;) {
		dlen = va_arg(argp, unsigned int);
		if (dlen == 0)
			break;
		data = va_arg(argp, unsigned char *);
		if (data == NULL) {
			ret = -EINVAL;
			break;
		}
		ret = crypto_shash_update(&sdesc->shash, data, dlen);
		if (ret < 0)
			break;
	}
	va_end(argp);
	if (!ret)
		ret = crypto_shash_final(&sdesc->shash, digest);
out:
	kfree(sdesc);
	return ret;
}
static int crypt_iv_tcw_whitening(struct crypt_config *cc,
				  struct dm_crypt_request *dmreq,
				  u8 *data)
{
	struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
	u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
	u8 buf[TCW_WHITENING_SIZE];
	struct {
		struct shash_desc desc;
		char ctx[crypto_shash_descsize(tcw->crc32_tfm)];
	} sdesc;
	int i, r;

	/* xor whitening with sector number */
	memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
	crypto_xor(buf, (u8 *)&sector, 8);
	crypto_xor(&buf[8], (u8 *)&sector, 8);

	/* calculate crc32 for every 32bit part and xor it */
	sdesc.desc.tfm = tcw->crc32_tfm;
	sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
	for (i = 0; i < 4; i++) {
		r = crypto_shash_init(&sdesc.desc);
		if (r)
			goto out;
		r = crypto_shash_update(&sdesc.desc, &buf[i * 4], 4);
		if (r)
			goto out;
		r = crypto_shash_final(&sdesc.desc, &buf[i * 4]);
		if (r)
			goto out;
	}
	crypto_xor(&buf[0], &buf[12], 4);
	crypto_xor(&buf[4], &buf[8], 4);

	/* apply whitening (8 bytes) to whole sector */
	for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
		crypto_xor(data + i * 8, buf, 8);
out:
	memzero_explicit(buf, sizeof(buf));
	return r;
}
Ejemplo n.º 27
0
/* Protect against 'cutting & pasting' security.evm xattr, include inode
 * specific info.
 *
 * (Additional directory/file metadata needs to be added for more complete
 * protection.)
 */
static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
			  char *digest)
{
	struct h_misc {
		unsigned long ino;
		__u32 generation;
		uid_t uid;
		gid_t gid;
		umode_t mode;
	} hmac_misc;

	memset(&hmac_misc, 0, sizeof hmac_misc);
	hmac_misc.ino = inode->i_ino;
	hmac_misc.generation = inode->i_generation;
	hmac_misc.uid = inode->i_uid;
	hmac_misc.gid = inode->i_gid;
	hmac_misc.mode = inode->i_mode;
	crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof hmac_misc);
	crypto_shash_final(desc, digest);
}
Ejemplo n.º 28
0
/* produce a md4 message digest from data of length n bytes */
int
mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len)
{
    int rc;
    unsigned int size;
    struct crypto_shash *md4;
    struct sdesc *sdescmd4;

    md4 = crypto_alloc_shash("md4", 0, 0);
    if (IS_ERR(md4)) {
        cERROR(1, "%s: Crypto md4 allocation error %d\n", __func__, rc);
        return PTR_ERR(md4);
    }
    size = sizeof(struct shash_desc) + crypto_shash_descsize(md4);
    sdescmd4 = kmalloc(size, GFP_KERNEL);
    if (!sdescmd4) {
        rc = -ENOMEM;
        cERROR(1, "%s: Memory allocation failure\n", __func__);
        goto mdfour_err;
    }
    sdescmd4->shash.tfm = md4;
    sdescmd4->shash.flags = 0x0;

    rc = crypto_shash_init(&sdescmd4->shash);
    if (rc) {
        cERROR(1, "%s: Could not init md4 shash\n", __func__);
        goto mdfour_err;
    }
    crypto_shash_update(&sdescmd4->shash, link_str, link_len);
    rc = crypto_shash_final(&sdescmd4->shash, md4_hash);

mdfour_err:
    crypto_free_shash(md4);
    kfree(sdescmd4);

    return rc;
}
Ejemplo n.º 29
0
/*
 * Calculate the boot aggregate hash
 */
static int __init ima_calc_boot_aggregate_tfm(char *digest,
					      struct crypto_shash *tfm)
{
	u8 pcr_i[TPM_DIGEST_SIZE];
	int rc, i;
	SHASH_DESC_ON_STACK(shash, tfm);

	shash->tfm = tfm;
	shash->flags = 0;

	rc = crypto_shash_init(shash);
	if (rc != 0)
		return rc;

	/* cumulative sha1 over tpm registers 0-7 */
	for (i = TPM_PCR0; i < TPM_PCR8; i++) {
		ima_pcrread(i, pcr_i);
		/* now accumulate with current aggregate */
		rc = crypto_shash_update(shash, pcr_i, TPM_DIGEST_SIZE);
	}
	if (!rc)
		crypto_shash_final(shash, digest);
	return rc;
}
Ejemplo n.º 30
0
static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
				  struct qat_alg_aead_ctx *ctx,
				  const uint8_t *auth_key,
				  unsigned int auth_keylen)
{
	SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
	int block_size = crypto_shash_blocksize(ctx->hash_tfm);
	int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
	__be32 *hash_state_out;
	__be64 *hash512_state_out;
	int i, offset;

	memset(ctx->ipad, 0, block_size);
	memset(ctx->opad, 0, block_size);
	shash->tfm = ctx->hash_tfm;
	shash->flags = 0x0;

	if (auth_keylen > block_size) {
		int ret = crypto_shash_digest(shash, auth_key,
					      auth_keylen, ctx->ipad);
		if (ret)
			return ret;

		memcpy(ctx->opad, ctx->ipad, digest_size);
	} else {
		memcpy(ctx->ipad, auth_key, auth_keylen);
		memcpy(ctx->opad, auth_key, auth_keylen);
	}

	for (i = 0; i < block_size; i++) {
		char *ipad_ptr = ctx->ipad + i;
		char *opad_ptr = ctx->opad + i;
		*ipad_ptr ^= HMAC_IPAD_VALUE;
		*opad_ptr ^= HMAC_OPAD_VALUE;
	}

	if (crypto_shash_init(shash))
		return -EFAULT;

	if (crypto_shash_update(shash, ctx->ipad, block_size))
		return -EFAULT;

	hash_state_out = (__be32 *)hash->sha.state1;
	hash512_state_out = (__be64 *)hash_state_out;

	switch (ctx->qat_hash_alg) {
	case ICP_QAT_HW_AUTH_ALGO_SHA1:
		if (crypto_shash_export(shash, &ctx->sha1))
			return -EFAULT;
		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
			*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
		break;
	case ICP_QAT_HW_AUTH_ALGO_SHA256:
		if (crypto_shash_export(shash, &ctx->sha256))
			return -EFAULT;
		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
			*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
		break;
	case ICP_QAT_HW_AUTH_ALGO_SHA512:
		if (crypto_shash_export(shash, &ctx->sha512))
			return -EFAULT;
		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
			*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
		break;
	default:
		return -EFAULT;
	}

	if (crypto_shash_init(shash))
		return -EFAULT;

	if (crypto_shash_update(shash, ctx->opad, block_size))
		return -EFAULT;

	offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
	hash_state_out = (__be32 *)(hash->sha.state1 + offset);
	hash512_state_out = (__be64 *)hash_state_out;

	switch (ctx->qat_hash_alg) {
	case ICP_QAT_HW_AUTH_ALGO_SHA1:
		if (crypto_shash_export(shash, &ctx->sha1))
			return -EFAULT;
		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
			*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
		break;
	case ICP_QAT_HW_AUTH_ALGO_SHA256:
		if (crypto_shash_export(shash, &ctx->sha256))
			return -EFAULT;
		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
			*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
		break;
	case ICP_QAT_HW_AUTH_ALGO_SHA512:
		if (crypto_shash_export(shash, &ctx->sha512))
			return -EFAULT;
		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
			*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
		break;
	default:
		return -EFAULT;
	}
	memzero_explicit(ctx->ipad, block_size);
	memzero_explicit(ctx->opad, block_size);
	return 0;
}