static struct shash_desc *init_desc(void) { int rc; struct shash_desc *desc; if (hmac_tfm == NULL) { hmac_tfm = crypto_alloc_shash(evm_hmac, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hmac_tfm)) { pr_err("Can not allocate %s (reason: %ld)\n", evm_hmac, PTR_ERR(hmac_tfm)); rc = PTR_ERR(hmac_tfm); hmac_tfm = NULL; return ERR_PTR(rc); } } desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac_tfm), GFP_KERNEL); if (!desc) return ERR_PTR(-ENOMEM); desc->tfm = hmac_tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len); if (rc) goto out; rc = crypto_shash_init(desc); out: if (rc) { kfree(desc); return ERR_PTR(rc); } return desc; }
static int mv_hash_final_fallback(struct ahash_request *req) { const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); struct { struct shash_desc shash; char ctx[crypto_shash_descsize(tfm_ctx->fallback)]; } desc; int rc; desc.shash.tfm = tfm_ctx->fallback; desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP; if (unlikely(req_ctx->first_hash)) { crypto_shash_init(&desc.shash); crypto_shash_update(&desc.shash, req_ctx->buffer, req_ctx->extra_bytes); } else { /* only SHA1 for now.... */ rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash); if (rc) goto out; } rc = crypto_shash_final(&desc.shash, req->result); out: return rc; }
/* * Set up the signature parameters in an X.509 certificate. This involves * digesting the signed data and extracting the signature. */ int x509_get_sig_params(struct x509_certificate *cert) { struct crypto_shash *tfm; struct shash_desc *desc; size_t digest_size, desc_size; void *digest; int ret; pr_devel("==>%s()\n", __func__); if (cert->unsupported_crypto) return -ENOPKG; if (cert->sig.rsa.s) return 0; cert->sig.rsa.s = mpi_read_raw_data(cert->raw_sig, cert->raw_sig_size); if (!cert->sig.rsa.s) return -ENOMEM; cert->sig.nr_mpi = 1; /* Allocate the hashing algorithm we're going to need and find out how * big the hash operational data will be. */ tfm = crypto_alloc_shash(hash_algo_name[cert->sig.pkey_hash_algo], 0, 0); if (IS_ERR(tfm)) { if (PTR_ERR(tfm) == -ENOENT) { cert->unsupported_crypto = true; return -ENOPKG; } return PTR_ERR(tfm); } desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); digest_size = crypto_shash_digestsize(tfm); /* We allocate the hash operational data storage on the end of the * digest storage space. */ ret = -ENOMEM; digest = kzalloc(digest_size + desc_size, GFP_KERNEL); if (!digest) goto error; cert->sig.digest = digest; cert->sig.digest_size = digest_size; desc = digest + digest_size; desc->tfm = tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; ret = crypto_shash_init(desc); if (ret < 0) goto error; might_sleep(); ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, digest); error: crypto_free_shash(tfm); pr_devel("<==%s() = %d\n", __func__, ret); return ret; }
static int cifs_calc_signature2(const struct kvec *iov, int n_vec, struct TCP_Server_Info *server, char *signature) { int i; int rc; if (iov == NULL || signature == NULL || server == NULL) return -EINVAL; if (!server->secmech.sdescmd5) { cERROR(1, "%s: Can't generate signature\n", __func__); return -1; } rc = crypto_shash_init(&server->secmech.sdescmd5->shash); if (rc) { cERROR(1, "%s: Could not init md5\n", __func__); return rc; } rc = crypto_shash_update(&server->secmech.sdescmd5->shash, server->session_key.response, server->session_key.len); if (rc) { cERROR(1, "%s: Could not update with response\n", __func__); return rc; } for (i = 0; i < n_vec; i++) { if (iov[i].iov_len == 0) continue; if (iov[i].iov_base == NULL) { cERROR(1, "null iovec entry"); return -EIO; } /* The first entry includes a length field (which does not get signed that occupies the first 4 bytes before the header */ if (i == 0) { if (iov[0].iov_len <= 8) /* cmd field at offset 9 */ break; /* nothing to sign or corrupt header */ rc = crypto_shash_update(&server->secmech.sdescmd5->shash, iov[i].iov_base + 4, iov[i].iov_len - 4); } else { rc = crypto_shash_update(&server->secmech.sdescmd5->shash, iov[i].iov_base, iov[i].iov_len); } if (rc) { cERROR(1, "%s: Could not update with payload\n", __func__); return rc; } } rc = crypto_shash_final(&server->secmech.sdescmd5->shash, signature); if (rc) cERROR(1, "%s: Could not generate md5 hash\n", __func__); return rc; }
/* * Calculate and return the CIFS signature based on the mac key and SMB PDU. * The 16 byte signature must be allocated by the caller. Note we only use the * 1st eight bytes and that the smb header signature field on input contains * the sequence number before this function is called. Also, this function * should be called with the server->srv_mutex held. */ static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server, char *signature) { int rc; if (cifs_pdu == NULL || signature == NULL || server == NULL) return -EINVAL; if (!server->secmech.sdescmd5) { cERROR(1, "%s: Can't generate signature\n", __func__); return -1; } rc = crypto_shash_init(&server->secmech.sdescmd5->shash); if (rc) { cERROR(1, "%s: Oould not init md5\n", __func__); return rc; } crypto_shash_update(&server->secmech.sdescmd5->shash, server->session_key.response, server->session_key.len); crypto_shash_update(&server->secmech.sdescmd5->shash, cifs_pdu->Protocol, cifs_pdu->smb_buf_length); rc = crypto_shash_final(&server->secmech.sdescmd5->shash, signature); return 0; }
static int symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash) { int rc; unsigned int size; struct crypto_shash *md5; struct sdesc *sdescmd5; md5 = crypto_alloc_shash("md5", 0, 0); if (IS_ERR(md5)) { rc = PTR_ERR(md5); cERROR(1, "%s: Crypto md5 allocation error %d\n", __func__, rc); return rc; } size = sizeof(struct shash_desc) + crypto_shash_descsize(md5); sdescmd5 = kmalloc(size, GFP_KERNEL); if (!sdescmd5) { rc = -ENOMEM; cERROR(1, "%s: Memory allocation failure\n", __func__); goto symlink_hash_err; } sdescmd5->shash.tfm = md5; sdescmd5->shash.flags = 0x0; rc = crypto_shash_init(&sdescmd5->shash); if (rc) { cERROR(1, "%s: Could not init md5 shash\n", __func__); goto symlink_hash_err; } <<<<<<< HEAD
static int CalcNTLMv2_response(const struct cifsSesInfo *ses, char *ntlmv2_hash) { int rc; unsigned int offset = CIFS_SESS_KEY_SIZE + 8; if (!ses->server->secmech.sdeschmacmd5) { cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n"); return -1; } crypto_shash_setkey(ses->server->secmech.hmacmd5, ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE); rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash); if (rc) { cERROR(1, "CalcNTLMv2_response: could not init hmacmd5"); return rc; } if (ses->server->secType == RawNTLMSSP) memcpy(ses->auth_key.response + offset, ses->ntlmssp->cryptkey, CIFS_SERVER_CHALLENGE_SIZE); else memcpy(ses->auth_key.response + offset, ses->server->cryptkey, CIFS_SERVER_CHALLENGE_SIZE); crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash, ses->auth_key.response + offset, ses->auth_key.len - offset); rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash, ses->auth_key.response + CIFS_SESS_KEY_SIZE); return rc; }
static int calc_buffer_shash_tfm(const void *buf, loff_t size, struct ima_digest_data *hash, struct crypto_shash *tfm) { SHASH_DESC_ON_STACK(shash, tfm); unsigned int len; int rc; shash->tfm = tfm; shash->flags = 0; hash->length = crypto_shash_digestsize(tfm); rc = crypto_shash_init(shash); if (rc != 0) return rc; while (size) { len = size < PAGE_SIZE ? size : PAGE_SIZE; rc = crypto_shash_update(shash, buf, len); if (rc) break; buf += len; size -= len; } if (!rc) rc = crypto_shash_final(shash, hash->digest); return rc; }
char *aa_calc_hash(void *data, size_t len) { SHASH_DESC_ON_STACK(desc, apparmor_tfm); char *hash = NULL; int error = -ENOMEM; if (!apparmor_tfm) return NULL; hash = kzalloc(apparmor_hash_size, GFP_KERNEL); if (!hash) goto fail; desc->tfm = apparmor_tfm; error = crypto_shash_init(desc); if (error) goto fail; error = crypto_shash_update(desc, (u8 *) data, len); if (error) goto fail; error = crypto_shash_final(desc, hash); if (error) goto fail; return hash; fail: kfree(hash); return ERR_PTR(error); }
/* * Calculate the boot aggregate hash */ int __init ima_calc_boot_aggregate(char *digest) { u8 pcr_i[IMA_DIGEST_SIZE]; int rc, i; struct { struct shash_desc shash; char ctx[crypto_shash_descsize(ima_shash_tfm)]; } desc; desc.shash.tfm = ima_shash_tfm; desc.shash.flags = 0; rc = crypto_shash_init(&desc.shash); if (rc != 0) return rc; /* cumulative sha1 over tpm registers 0-7 */ for (i = TPM_PCR0; i < TPM_PCR8; i++) { ima_pcrread(i, pcr_i); /* now accumulate with current aggregate */ rc = crypto_shash_update(&desc.shash, pcr_i, IMA_DIGEST_SIZE); } if (!rc) crypto_shash_final(&desc.shash, digest); return rc; }
/* * Digest the module contents. */ static struct public_key_signature *mod_make_digest(enum pkey_hash_algo hash, const void *mod, unsigned long modlen) { struct public_key_signature *pks; struct crypto_shash *tfm; struct shash_desc *desc; size_t digest_size, desc_size; int ret; pr_devel("==>%s()\n", __func__); /* Allocate the hashing algorithm we're going to need and find out how * big the hash operational data will be. */ tfm = crypto_alloc_shash(pkey_hash_algo[hash], 0, 0); if (IS_ERR(tfm)) return (PTR_ERR(tfm) == -ENOENT) ? ERR_PTR(-ENOPKG) : ERR_CAST(tfm); desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); digest_size = crypto_shash_digestsize(tfm); /* We allocate the hash operational data storage on the end of our * context data and the digest output buffer on the end of that. */ ret = -ENOMEM; pks = kzalloc(digest_size + sizeof(*pks) + desc_size, GFP_KERNEL); if (!pks) goto error_no_pks; pks->pkey_hash_algo = hash; pks->digest = (u8 *)pks + sizeof(*pks) + desc_size; pks->digest_size = digest_size; desc = (void *)pks + sizeof(*pks); desc->tfm = tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; ret = crypto_shash_init(desc); if (ret < 0) goto error; ret = crypto_shash_finup(desc, mod, modlen, pks->digest); if (ret < 0) goto error; crypto_free_shash(tfm); pr_devel("<==%s() = ok\n", __func__); return pks; error: kfree(pks); error_no_pks: crypto_free_shash(tfm); pr_devel("<==%s() = %d\n", __func__, ret); return ERR_PTR(ret); }
static int padlock_sha_init(struct shash_desc *desc) { struct padlock_sha_desc *dctx = shash_desc_ctx(desc); struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); dctx->fallback.tfm = ctx->fallback; dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_shash_init(&dctx->fallback); }
static int ima_calc_file_hash_tfm(struct file *file, struct ima_digest_data *hash, struct crypto_shash *tfm) { loff_t i_size, offset = 0; char *rbuf; int rc, read = 0; SHASH_DESC_ON_STACK(shash, tfm); shash->tfm = tfm; shash->flags = 0; hash->length = crypto_shash_digestsize(tfm); rc = crypto_shash_init(shash); if (rc != 0) return rc; i_size = i_size_read(file_inode(file)); if (i_size == 0) goto out; rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!rbuf) return -ENOMEM; if (!(file->f_mode & FMODE_READ)) { file->f_mode |= FMODE_READ; read = 1; } while (offset < i_size) { int rbuf_len; rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE); if (rbuf_len < 0) { rc = rbuf_len; break; } if (rbuf_len == 0) break; offset += rbuf_len; rc = crypto_shash_update(shash, rbuf, rbuf_len); if (rc) break; } if (read) file->f_mode &= ~FMODE_READ; kfree(rbuf); out: if (!rc) rc = crypto_shash_final(shash, hash->digest); return rc; }
static struct shash_desc *init_desc(char type) { long rc; char *algo; struct crypto_shash **tfm; struct shash_desc *desc; if (type == EVM_XATTR_HMAC) { tfm = &hmac_tfm; algo = evm_hmac; } else { tfm = &hash_tfm; algo = evm_hash; } if (*tfm == NULL) { mutex_lock(&mutex); if (*tfm) goto out; *tfm = crypto_alloc_shash(algo, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(*tfm)) { rc = PTR_ERR(*tfm); pr_err("Can not allocate %s (reason: %ld)\n", algo, rc); *tfm = NULL; mutex_unlock(&mutex); return ERR_PTR(rc); } if (type == EVM_XATTR_HMAC) { rc = crypto_shash_setkey(*tfm, evmkey, evmkey_len); if (rc) { crypto_free_shash(*tfm); *tfm = NULL; mutex_unlock(&mutex); return ERR_PTR(rc); } } out: mutex_unlock(&mutex); } desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm), GFP_KERNEL); if (!desc) return ERR_PTR(-ENOMEM); desc->tfm = *tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; rc = crypto_shash_init(desc); if (rc) { kfree(desc); return ERR_PTR(rc); } return desc; }
/* * Implementation of the KDF in counter mode according to SP800-108 section 5.1 * as well as SP800-56A section 5.8.1 (Single-step KDF). * * SP800-56A: * The src pointer is defined as Z || other info where Z is the shared secret * from DH and other info is an arbitrary string (see SP800-56A section * 5.8.1.2). * * 'dlen' must be a multiple of the digest size. */ static int kdf_ctr(struct kdf_sdesc *sdesc, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen, unsigned int zlen) { struct shash_desc *desc = &sdesc->shash; unsigned int h = crypto_shash_digestsize(desc->tfm); int err = 0; u8 *dst_orig = dst; __be32 counter = cpu_to_be32(1); while (dlen) { err = crypto_shash_init(desc); if (err) goto err; err = crypto_shash_update(desc, (u8 *)&counter, sizeof(__be32)); if (err) goto err; if (zlen && h) { u8 tmpbuffer[32]; size_t chunk = min_t(size_t, zlen, sizeof(tmpbuffer)); memset(tmpbuffer, 0, chunk); do { err = crypto_shash_update(desc, tmpbuffer, chunk); if (err) goto err; zlen -= chunk; chunk = min_t(size_t, zlen, sizeof(tmpbuffer)); } while (zlen); } if (src && slen) { err = crypto_shash_update(desc, src, slen); if (err) goto err; } err = crypto_shash_final(desc, dst); if (err) goto err; dlen -= h; dst += h; counter = cpu_to_be32(be32_to_cpu(counter) + 1); } return 0; err: memzero_explicit(dst_orig, dlen); return err; }
static int p8_ghash_init(struct shash_desc *desc) { struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); dctx->bytes = 0; memset(dctx->shash, 0, GHASH_DIGEST_SIZE); dctx->fallback_desc.tfm = ctx->fallback; dctx->fallback_desc.flags = desc->flags; return crypto_shash_init(&dctx->fallback_desc); }
static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, struct dm_crypt_request *dmreq, u8 *data) { struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; struct { struct shash_desc desc; char ctx[crypto_shash_descsize(lmk->hash_tfm)]; } sdesc; struct md5_state md5state; u32 buf[4]; int i, r; sdesc.desc.tfm = lmk->hash_tfm; sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; r = crypto_shash_init(&sdesc.desc); if (r) return r; if (lmk->seed) { r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE); if (r) return r; } /* Sector is always 512B, block size 16, add data of blocks 1-31 */ r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31); if (r) return r; /* Sector is cropped to 56 bits here */ buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF); buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000); buf[2] = cpu_to_le32(4024); buf[3] = 0; r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf)); if (r) return r; /* No MD5 padding here */ r = crypto_shash_export(&sdesc.desc, &md5state); if (r) return r; for (i = 0; i < MD5_HASH_WORDS; i++) __cpu_to_le32s(&md5state.hash[i]); memcpy(iv, &md5state.hash, cc->iv_size); return 0; }
/* * Calculate the MD5/SHA1 file digest */ int ima_calc_file_hash(struct file *file, char *digest) { loff_t i_size, offset = 0; char *rbuf; int rc, read = 0; struct { struct shash_desc shash; char ctx[crypto_shash_descsize(ima_shash_tfm)]; } desc; desc.shash.tfm = ima_shash_tfm; desc.shash.flags = 0; rc = crypto_shash_init(&desc.shash); if (rc != 0) return rc; rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!rbuf) { rc = -ENOMEM; goto out; } if (!(file->f_mode & FMODE_READ)) { file->f_mode |= FMODE_READ; read = 1; } i_size = i_size_read(file->f_dentry->d_inode); while (offset < i_size) { int rbuf_len; rbuf_len = kernel_read(file, offset, rbuf, PAGE_SIZE); if (rbuf_len < 0) { rc = rbuf_len; break; } if (rbuf_len == 0) break; offset += rbuf_len; rc = crypto_shash_update(&desc.shash, rbuf, rbuf_len); if (rc) break; } kfree(rbuf); if (!rc) rc = crypto_shash_final(&desc.shash, digest); if (read) file->f_mode &= ~FMODE_READ; out: return rc; }
/* * calculate authorization info fields to send to TPM */ static int TSS_authhmac(unsigned char *digest, const unsigned char *key, unsigned int keylen, unsigned char *h1, unsigned char *h2, unsigned char h3, ...) { unsigned char paramdigest[SHA1_DIGEST_SIZE]; struct sdesc *sdesc; unsigned int dlen; unsigned char *data; unsigned char c; int ret; va_list argp; sdesc = init_sdesc(hashalg); if (IS_ERR(sdesc)) { pr_info("trusted_key: can't alloc %s\n", hash_alg); return PTR_ERR(sdesc); } c = h3; ret = crypto_shash_init(&sdesc->shash); if (ret < 0) goto out; va_start(argp, h3); for (;;) { dlen = va_arg(argp, unsigned int); if (dlen == 0) break; data = va_arg(argp, unsigned char *); if (!data) { ret = -EINVAL; break; } ret = crypto_shash_update(&sdesc->shash, data, dlen); if (ret < 0) break; } va_end(argp); if (!ret) ret = crypto_shash_final(&sdesc->shash, paramdigest); if (!ret) ret = TSS_rawhmac(digest, key, keylen, SHA1_DIGEST_SIZE, paramdigest, TPM_NONCE_SIZE, h1, TPM_NONCE_SIZE, h2, 1, &c, 0, 0); out: kfree(sdesc); return ret; }
/* * Calculate the hash of template data */ static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data, struct ima_template_desc *td, int num_fields, struct ima_digest_data *hash, struct crypto_shash *tfm) { SHASH_DESC_ON_STACK(shash, tfm); int rc, i; shash->tfm = tfm; shash->flags = 0; hash->length = crypto_shash_digestsize(tfm); rc = crypto_shash_init(shash); if (rc != 0) return rc; for (i = 0; i < num_fields; i++) { u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 }; u8 *data_to_hash = field_data[i].data; u32 datalen = field_data[i].len; u32 datalen_to_hash = !ima_canonical_fmt ? datalen : cpu_to_le32(datalen); if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) { rc = crypto_shash_update(shash, (const u8 *) &datalen_to_hash, sizeof(datalen_to_hash)); if (rc) break; } else if (strcmp(td->fields[i]->field_id, "n") == 0) { memcpy(buffer, data_to_hash, datalen); data_to_hash = buffer; datalen = IMA_EVENT_NAME_LEN_MAX + 1; } rc = crypto_shash_update(shash, data_to_hash, datalen); if (rc) break; } if (!rc) rc = crypto_shash_final(shash, hash->digest); return rc; }
static int CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash) { int rc; unsigned int offset = CIFS_SESS_KEY_SIZE + 8; if (!ses->server->secmech.sdeschmacmd5) { cifs_dbg(VFS, "%s: can't generate ntlmv2 hash\n", __func__); return -1; } rc = crypto_shash_setkey(ses->server->secmech.hmacmd5, ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE); if (rc) { cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n", __func__); return rc; } rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash); if (rc) { cifs_dbg(VFS, "%s: could not init hmacmd5\n", __func__); return rc; } if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) memcpy(ses->auth_key.response + offset, ses->ntlmssp->cryptkey, CIFS_SERVER_CHALLENGE_SIZE); else memcpy(ses->auth_key.response + offset, ses->server->cryptkey, CIFS_SERVER_CHALLENGE_SIZE); rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash, ses->auth_key.response + offset, ses->auth_key.len - offset); if (rc) { cifs_dbg(VFS, "%s: Could not update with response\n", __func__); return rc; } rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash, ses->auth_key.response + CIFS_SESS_KEY_SIZE); if (rc) cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__); return rc; }
int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *da, u8 *sa, u8 priority, u8 *data, size_t data_len, u8 *mic) { SHASH_DESC_ON_STACK(desc, tfm_michael); u8 hdr[ETH_HLEN + 2]; /* size of header + padding */ int err; if (tfm_michael == NULL) { printk(KERN_WARNING "orinoco_mic: tfm_michael == NULL\n"); return -1; } /* Copy header into buffer. We need the padding on the end zeroed */ memcpy(&hdr[0], da, ETH_ALEN); memcpy(&hdr[ETH_ALEN], sa, ETH_ALEN); hdr[ETH_ALEN * 2] = priority; hdr[ETH_ALEN * 2 + 1] = 0; hdr[ETH_ALEN * 2 + 2] = 0; hdr[ETH_ALEN * 2 + 3] = 0; desc->tfm = tfm_michael; desc->flags = 0; err = crypto_shash_setkey(tfm_michael, key, MIC_KEYLEN); if (err) return err; err = crypto_shash_init(desc); if (err) return err; err = crypto_shash_update(desc, hdr, sizeof(hdr)); if (err) return err; err = crypto_shash_update(desc, data, data_len); if (err) return err; err = crypto_shash_final(desc, mic); shash_desc_zero(desc); return err; }
static int symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash) { int rc; unsigned int size; struct crypto_shash *md5; struct sdesc *sdescmd5; md5 = crypto_alloc_shash("md5", 0, 0); if (IS_ERR(md5)) { rc = PTR_ERR(md5); cERROR(1, "%s: Crypto md5 allocation error %d", __func__, rc); return rc; } size = sizeof(struct shash_desc) + crypto_shash_descsize(md5); sdescmd5 = kmalloc(size, GFP_KERNEL); if (!sdescmd5) { rc = -ENOMEM; cERROR(1, "%s: Memory allocation failure", __func__); goto symlink_hash_err; } sdescmd5->shash.tfm = md5; sdescmd5->shash.flags = 0x0; rc = crypto_shash_init(&sdescmd5->shash); if (rc) { cERROR(1, "%s: Could not init md5 shash", __func__); goto symlink_hash_err; } rc = crypto_shash_update(&sdescmd5->shash, link_str, link_len); if (rc) { cERROR(1, "%s: Could not update with link_str", __func__); goto symlink_hash_err; } rc = crypto_shash_final(&sdescmd5->shash, md5_hash); if (rc) cERROR(1, "%s: Could not generate md5 hash", __func__); symlink_hash_err: crypto_free_shash(md5); kfree(sdescmd5); return rc; }
static int TSS_rawhmac(unsigned char *digest, const unsigned char *key, unsigned int keylen, ...) { struct sdesc *sdesc; va_list argp; unsigned int dlen; unsigned char *data; int ret; sdesc = init_sdesc(hmacalg); if (IS_ERR(sdesc)) { pr_info("trusted_key: can't alloc %s\n", hmac_alg); return PTR_ERR(sdesc); } ret = crypto_shash_setkey(hmacalg, key, keylen); if (ret < 0) goto out; ret = crypto_shash_init(&sdesc->shash); if (ret < 0) goto out; va_start(argp, keylen); for (;;) { dlen = va_arg(argp, unsigned int); if (dlen == 0) break; data = va_arg(argp, unsigned char *); if (data == NULL) { ret = -EINVAL; break; } ret = crypto_shash_update(&sdesc->shash, data, dlen); if (ret < 0) break; } va_end(argp); if (!ret) ret = crypto_shash_final(&sdesc->shash, digest); out: kfree(sdesc); return ret; }
/** * RFC 6056, Algorithm 3. * * Just to clarify: Because our port pool is a somewhat complex data structure * (rather than a simple range), ephemerals are now handled by pool4. This * function has been stripped now to only consist of F(). (Hence the name.) */ int rfc6056_f(const struct tuple *tuple6, __u8 fields, unsigned int *result) { union { __be32 as32[4]; __u8 as8[16]; } md5_result; struct shash_desc *desc; int error = 0; desc = __wkmalloc("shash desc", sizeof(struct shash_desc) + crypto_shash_descsize(shash), GFP_ATOMIC); if (!desc) return -ENOMEM; desc->tfm = shash; desc->flags = 0; error = crypto_shash_init(desc); if (error) { log_debug("crypto_hash_init() failed. Errcode: %d", error); goto end; } error = hash_tuple(desc, fields, tuple6); if (error) { log_debug("crypto_hash_update() failed. Errcode: %d", error); goto end; } error = crypto_shash_final(desc, md5_result.as8); if (error) { log_debug("crypto_hash_digest() failed. Errcode: %d", error); goto end; } *result = (__force __u32)md5_result.as32[3]; /* Fall through. */ end: __wkfree("shash desc", desc); return error; }
static int crypt_iv_tcw_whitening(struct crypt_config *cc, struct dm_crypt_request *dmreq, u8 *data) { struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; u64 sector = cpu_to_le64((u64)dmreq->iv_sector); u8 buf[TCW_WHITENING_SIZE]; struct { struct shash_desc desc; char ctx[crypto_shash_descsize(tcw->crc32_tfm)]; } sdesc; int i, r; /* xor whitening with sector number */ memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE); crypto_xor(buf, (u8 *)§or, 8); crypto_xor(&buf[8], (u8 *)§or, 8); /* calculate crc32 for every 32bit part and xor it */ sdesc.desc.tfm = tcw->crc32_tfm; sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; for (i = 0; i < 4; i++) { r = crypto_shash_init(&sdesc.desc); if (r) goto out; r = crypto_shash_update(&sdesc.desc, &buf[i * 4], 4); if (r) goto out; r = crypto_shash_final(&sdesc.desc, &buf[i * 4]); if (r) goto out; } crypto_xor(&buf[0], &buf[12], 4); crypto_xor(&buf[4], &buf[8], 4); /* apply whitening (8 bytes) to whole sector */ for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++) crypto_xor(data + i * 8, buf, 8); out: memzero_explicit(buf, sizeof(buf)); return r; }
static int ghash_async_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct ahash_request *cryptd_req = ahash_request_ctx(req); struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; if (!irq_fpu_usable()) { memcpy(cryptd_req, req, sizeof(*req)); ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); return crypto_ahash_init(cryptd_req); } else { struct shash_desc *desc = cryptd_shash_desc(cryptd_req); struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm); desc->tfm = child; desc->flags = req->base.flags; return crypto_shash_init(desc); } }
int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start, size_t len) { SHASH_DESC_ON_STACK(desc, apparmor_tfm); int error = -ENOMEM; __le32 le32_version = cpu_to_le32(version); if (!aa_g_hash_policy) return 0; if (!apparmor_tfm) return 0; profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL); if (!profile->hash) goto fail; desc->tfm = apparmor_tfm; error = crypto_shash_init(desc); if (error) goto fail; error = crypto_shash_update(desc, (u8 *) &le32_version, 4); if (error) goto fail; error = crypto_shash_update(desc, (u8 *) start, len); if (error) goto fail; error = crypto_shash_final(desc, profile->hash); if (error) goto fail; return 0; fail: kfree(profile->hash); profile->hash = NULL; return error; }
/* produce a md4 message digest from data of length n bytes */ int mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len) { int rc; unsigned int size; struct crypto_shash *md4; struct sdesc *sdescmd4; md4 = crypto_alloc_shash("md4", 0, 0); if (IS_ERR(md4)) { cERROR(1, "%s: Crypto md4 allocation error %d\n", __func__, rc); return PTR_ERR(md4); } size = sizeof(struct shash_desc) + crypto_shash_descsize(md4); sdescmd4 = kmalloc(size, GFP_KERNEL); if (!sdescmd4) { rc = -ENOMEM; cERROR(1, "%s: Memory allocation failure\n", __func__); goto mdfour_err; } sdescmd4->shash.tfm = md4; sdescmd4->shash.flags = 0x0; rc = crypto_shash_init(&sdescmd4->shash); if (rc) { cERROR(1, "%s: Could not init md4 shash\n", __func__); goto mdfour_err; } crypto_shash_update(&sdescmd4->shash, link_str, link_len); rc = crypto_shash_final(&sdescmd4->shash, md4_hash); mdfour_err: crypto_free_shash(md4); kfree(sdescmd4); return rc; }
/* * Calculate the boot aggregate hash */ static int __init ima_calc_boot_aggregate_tfm(char *digest, struct crypto_shash *tfm) { u8 pcr_i[TPM_DIGEST_SIZE]; int rc, i; SHASH_DESC_ON_STACK(shash, tfm); shash->tfm = tfm; shash->flags = 0; rc = crypto_shash_init(shash); if (rc != 0) return rc; /* cumulative sha1 over tpm registers 0-7 */ for (i = TPM_PCR0; i < TPM_PCR8; i++) { ima_pcrread(i, pcr_i); /* now accumulate with current aggregate */ rc = crypto_shash_update(shash, pcr_i, TPM_DIGEST_SIZE); } if (!rc) crypto_shash_final(shash, digest); return rc; }