static void sahara_sha_cra_exit(struct crypto_tfm *tfm) { struct sahara_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_shash(ctx->shash_fallback); ctx->shash_fallback = NULL; }
/* * Set up the signature parameters in an X.509 certificate. This involves * digesting the signed data and extracting the signature. */ int x509_get_sig_params(struct x509_certificate *cert) { struct crypto_shash *tfm; struct shash_desc *desc; size_t digest_size, desc_size; void *digest; int ret; pr_devel("==>%s()\n", __func__); if (cert->unsupported_crypto) return -ENOPKG; if (cert->sig.rsa.s) return 0; cert->sig.rsa.s = mpi_read_raw_data(cert->raw_sig, cert->raw_sig_size); if (!cert->sig.rsa.s) return -ENOMEM; cert->sig.nr_mpi = 1; /* Allocate the hashing algorithm we're going to need and find out how * big the hash operational data will be. */ tfm = crypto_alloc_shash(hash_algo_name[cert->sig.pkey_hash_algo], 0, 0); if (IS_ERR(tfm)) { if (PTR_ERR(tfm) == -ENOENT) { cert->unsupported_crypto = true; return -ENOPKG; } return PTR_ERR(tfm); } desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); digest_size = crypto_shash_digestsize(tfm); /* We allocate the hash operational data storage on the end of the * digest storage space. */ ret = -ENOMEM; digest = kzalloc(digest_size + desc_size, GFP_KERNEL); if (!digest) goto error; cert->sig.digest = digest; cert->sig.digest_size = digest_size; desc = digest + digest_size; desc->tfm = tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; ret = crypto_shash_init(desc); if (ret < 0) goto error; might_sleep(); ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, digest); error: crypto_free_shash(tfm); pr_devel("<==%s() = %d\n", __func__, ret); return ret; }
static int __init trusted_shash_alloc(void) { int ret; hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hmacalg)) { pr_info("trusted_key: could not allocate crypto %s\n", hmac_alg); return PTR_ERR(hmacalg); } hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hashalg)) { pr_info("trusted_key: could not allocate crypto %s\n", hash_alg); ret = PTR_ERR(hashalg); goto hashalg_fail; } return 0; hashalg_fail: crypto_free_shash(hmacalg); return ret; }
static void cc_cipher_exit(struct crypto_tfm *tfm) { struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); struct device *dev = drvdata_to_dev(ctx_p->drvdata); unsigned int max_key_buf_size = get_max_keysize(tfm); dev_dbg(dev, "Clearing context @%p for %s\n", crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm)); if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { /* Free hash tfm for essiv */ crypto_free_shash(ctx_p->shash_tfm); ctx_p->shash_tfm = NULL; } /* Unmap key buffer */ dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size, DMA_TO_DEVICE); dev_dbg(dev, "Unmapped key buffer key_dma_addr=%pad\n", &ctx_p->user.key_dma_addr); /* Free key buffer in context */ kfree(ctx_p->user.key); dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key); }
static int kdf_alloc(struct kdf_sdesc **sdesc_ret, char *hashname) { struct crypto_shash *tfm; struct kdf_sdesc *sdesc; int size; int err; /* allocate synchronous hash */ tfm = crypto_alloc_shash(hashname, 0, 0); if (IS_ERR(tfm)) { pr_info("could not allocate digest TFM handle %s\n", hashname); return PTR_ERR(tfm); } err = -EINVAL; if (crypto_shash_digestsize(tfm) == 0) goto out_free_tfm; err = -ENOMEM; size = sizeof(struct shash_desc) + crypto_shash_descsize(tfm); sdesc = kmalloc(size, GFP_KERNEL); if (!sdesc) goto out_free_tfm; sdesc->shash.tfm = tfm; sdesc->shash.flags = 0x0; *sdesc_ret = sdesc; return 0; out_free_tfm: crypto_free_shash(tfm); return err; }
static int derive_essiv_salt(const u8 *key, int keysize, u8 *salt) { struct crypto_shash *tfm = READ_ONCE(essiv_hash_tfm); /* init hash transform on demand */ if (unlikely(!tfm)) { struct crypto_shash *prev_tfm; tfm = crypto_alloc_shash("sha256", 0, 0); if (IS_ERR(tfm)) { pr_warn_ratelimited("fscrypt: error allocating SHA-256 transform: %ld\n", PTR_ERR(tfm)); return PTR_ERR(tfm); } prev_tfm = cmpxchg(&essiv_hash_tfm, NULL, tfm); if (prev_tfm) { crypto_free_shash(tfm); tfm = prev_tfm; } } { SHASH_DESC_ON_STACK(desc, tfm); desc->tfm = tfm; desc->flags = 0; return crypto_shash_digest(desc, key, keysize, salt); } }
static void p8_ghash_exit_tfm(struct crypto_tfm *tfm) { struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); if (ctx->fallback) { crypto_free_shash(ctx->fallback); ctx->fallback = NULL; } }
void cifs_crypto_shash_release(struct TCP_Server_Info *server) { if (server->secmech.hmacsha256) crypto_free_shash(server->secmech.hmacsha256); if (server->secmech.md5) crypto_free_shash(server->secmech.md5); if (server->secmech.hmacmd5) crypto_free_shash(server->secmech.hmacmd5); kfree(server->secmech.sdeschmacsha256); kfree(server->secmech.sdeschmacmd5); kfree(server->secmech.sdescmd5); }
static int smb3_crypto_shash_allocate(struct TCP_Server_Info *server) { unsigned int size; int rc; if (server->secmech.sdesccmacaes != NULL) return 0; /* already allocated */ rc = smb2_crypto_shash_allocate(server); if (rc) return rc; server->secmech.cmacaes = crypto_alloc_shash("cmac(aes)", 0, 0); if (IS_ERR(server->secmech.cmacaes)) { cifs_dbg(VFS, "could not allocate crypto cmac-aes"); kfree(server->secmech.sdeschmacsha256); server->secmech.sdeschmacsha256 = NULL; crypto_free_shash(server->secmech.hmacsha256); server->secmech.hmacsha256 = NULL; rc = PTR_ERR(server->secmech.cmacaes); server->secmech.cmacaes = NULL; return rc; } size = sizeof(struct shash_desc) + crypto_shash_descsize(server->secmech.cmacaes); server->secmech.sdesccmacaes = kmalloc(size, GFP_KERNEL); if (!server->secmech.sdesccmacaes) { cifs_dbg(VFS, "%s: Can't alloc cmacaes\n", __func__); kfree(server->secmech.sdeschmacsha256); server->secmech.sdeschmacsha256 = NULL; crypto_free_shash(server->secmech.hmacsha256); crypto_free_shash(server->secmech.cmacaes); server->secmech.hmacsha256 = NULL; server->secmech.cmacaes = NULL; return -ENOMEM; } server->secmech.sdesccmacaes->shash.tfm = server->secmech.cmacaes; server->secmech.sdesccmacaes->shash.flags = 0x0; return 0; }
static void kdf_dealloc(struct kdf_sdesc *sdesc) { if (!sdesc) return; if (sdesc->shash.tfm) crypto_free_shash(sdesc->shash.tfm); kzfree(sdesc); }
static struct shash_desc *init_desc(char type) { long rc; char *algo; struct crypto_shash **tfm; struct shash_desc *desc; if (type == EVM_XATTR_HMAC) { tfm = &hmac_tfm; algo = evm_hmac; } else { tfm = &hash_tfm; algo = evm_hash; } if (*tfm == NULL) { mutex_lock(&mutex); if (*tfm) goto out; *tfm = crypto_alloc_shash(algo, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(*tfm)) { rc = PTR_ERR(*tfm); pr_err("Can not allocate %s (reason: %ld)\n", algo, rc); *tfm = NULL; mutex_unlock(&mutex); return ERR_PTR(rc); } if (type == EVM_XATTR_HMAC) { rc = crypto_shash_setkey(*tfm, evmkey, evmkey_len); if (rc) { crypto_free_shash(*tfm); *tfm = NULL; mutex_unlock(&mutex); return ERR_PTR(rc); } } out: mutex_unlock(&mutex); } desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm), GFP_KERNEL); if (!desc) return ERR_PTR(-ENOMEM); desc->tfm = *tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; rc = crypto_shash_init(desc); if (rc) { kfree(desc); return ERR_PTR(rc); } return desc; }
static void crypt_iv_lmk_dtr(struct crypt_config *cc) { struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm)) crypto_free_shash(lmk->hash_tfm); lmk->hash_tfm = NULL; kzfree(lmk->seed); lmk->seed = NULL; }
static void crypt_iv_tcw_dtr(struct crypt_config *cc) { struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; kzfree(tcw->iv_seed); tcw->iv_seed = NULL; kzfree(tcw->whitening); tcw->whitening = NULL; if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm)) crypto_free_shash(tcw->crc32_tfm); tcw->crc32_tfm = NULL; }
int btrfs_dedup_cleanup(struct btrfs_fs_info *fs_info) { if (!fs_info->dedup_info) return 0; if (fs_info->dedup_info->backend == BTRFS_DEDUP_BACKEND_INMEMORY) inmem_destroy(fs_info); if (fs_info->dedup_info->dedup_root) { free_root_extent_buffers(fs_info->dedup_info->dedup_root); kfree(fs_info->dedup_info->dedup_root); } crypto_free_shash(fs_info->dedup_info->dedup_driver); kfree(fs_info->dedup_info); fs_info->dedup_info = NULL; return 0; }
int btrfs_dedup_disable(struct btrfs_fs_info *fs_info) { struct btrfs_dedup_info *dedup_info = fs_info->dedup_info; int ret = 0; if (!dedup_info) return 0; if (dedup_info->backend == BTRFS_DEDUP_BACKEND_INMEMORY) inmem_destroy(fs_info); if (dedup_info->dedup_root) ret = btrfs_drop_snapshot(dedup_info->dedup_root, NULL, 1, 0); crypto_free_shash(fs_info->dedup_info->dedup_driver); kfree(fs_info->dedup_info); fs_info->dedup_info = NULL; return ret; }
static int symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash) { int rc; unsigned int size; struct crypto_shash *md5; struct sdesc *sdescmd5; md5 = crypto_alloc_shash("md5", 0, 0); if (IS_ERR(md5)) { rc = PTR_ERR(md5); cERROR(1, "%s: Crypto md5 allocation error %d\n", __func__, rc); return rc; } size = sizeof(struct shash_desc) + crypto_shash_descsize(md5); sdescmd5 = kmalloc(size, GFP_KERNEL); if (!sdescmd5) { rc = -ENOMEM; cERROR(1, "%s: Memory allocation failure\n", __func__); goto symlink_hash_err; } sdescmd5->shash.tfm = md5; sdescmd5->shash.flags = 0x0; rc = crypto_shash_init(&sdescmd5->shash); if (rc) { cERROR(1, "%s: Could not init md5 shash\n", __func__); goto symlink_hash_err; } rc = crypto_shash_update(&sdescmd5->shash, link_str, link_len); if (rc) { cERROR(1, "%s: Could not update iwth link_str\n", __func__); goto symlink_hash_err; } rc = crypto_shash_final(&sdescmd5->shash, md5_hash); if (rc) cERROR(1, "%s: Could not generate md5 hash\n", __func__); symlink_hash_err: crypto_free_shash(md5); kfree(sdescmd5); return rc; }
/* free resources for a rxe device all objects created for this device must * have been destroyed */ void rxe_dealloc(struct ib_device *ib_dev) { struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev); rxe_pool_cleanup(&rxe->uc_pool); rxe_pool_cleanup(&rxe->pd_pool); rxe_pool_cleanup(&rxe->ah_pool); rxe_pool_cleanup(&rxe->srq_pool); rxe_pool_cleanup(&rxe->qp_pool); rxe_pool_cleanup(&rxe->cq_pool); rxe_pool_cleanup(&rxe->mr_pool); rxe_pool_cleanup(&rxe->mw_pool); rxe_pool_cleanup(&rxe->mc_grp_pool); rxe_pool_cleanup(&rxe->mc_elem_pool); rxe_cleanup_ports(rxe); if (rxe->tfm) crypto_free_shash(rxe->tfm); }
static int calc_hash(const u8 *src, int src_len, u8 *out, struct device *dev) { struct crypto_shash *shash; struct sdesc *desc; int size; int ret = -EFAULT; shash = crypto_alloc_shash(HASH_ALG, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(shash)) { dev_err(dev, "%s: Error. crypto_alloc_shash.\n", __func__); goto err_shash; } size = sizeof(struct shash_desc) + crypto_shash_descsize(shash); desc = kmalloc(size, GFP_KERNEL); if (!desc) { dev_err(dev, "%s: Error. No enough mem for Desc.\n", __func__); ret = -ENOMEM; goto err_desc; } desc->shash.tfm = shash; desc->shash.flags = 0x00; if (crypto_shash_digest(&desc->shash, src, src_len, out)) { dev_err(dev, "%s: Error. generate hash.\n", __func__); goto err_generate; } ret = 0; err_generate: kfree(desc); err_desc: crypto_free_shash(shash); err_shash: return ret; }
/* produce a md4 message digest from data of length n bytes */ int mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len) { int rc; unsigned int size; struct crypto_shash *md4; struct sdesc *sdescmd4; md4 = crypto_alloc_shash("md4", 0, 0); if (IS_ERR(md4)) { cERROR(1, "%s: Crypto md4 allocation error %d\n", __func__, rc); return PTR_ERR(md4); } size = sizeof(struct shash_desc) + crypto_shash_descsize(md4); sdescmd4 = kmalloc(size, GFP_KERNEL); if (!sdescmd4) { rc = -ENOMEM; cERROR(1, "%s: Memory allocation failure\n", __func__); goto mdfour_err; } sdescmd4->shash.tfm = md4; sdescmd4->shash.flags = 0x0; rc = crypto_shash_init(&sdescmd4->shash); if (rc) { cERROR(1, "%s: Could not init md4 shash\n", __func__); goto mdfour_err; } crypto_shash_update(&sdescmd4->shash, link_str, link_len); rc = crypto_shash_final(&sdescmd4->shash, md4_hash); mdfour_err: crypto_free_shash(md4); kfree(sdescmd4); return rc; }
static void __exit crc_t10dif_mod_fini(void) { crypto_free_shash(crct10dif_tfm); }
/* * Digest the contents of the PE binary, leaving out the image checksum and the * certificate data block. */ static int pefile_digest_pe(const void *pebuf, unsigned int pelen, struct pefile_context *ctx) { struct crypto_shash *tfm; struct shash_desc *desc; size_t digest_size, desc_size; void *digest; int ret; kenter(",%s", ctx->digest_algo); /* Allocate the hashing algorithm we're going to need and find out how * big the hash operational data will be. */ tfm = crypto_alloc_shash(ctx->digest_algo, 0, 0); if (IS_ERR(tfm)) return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm); desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); digest_size = crypto_shash_digestsize(tfm); if (digest_size != ctx->digest_len) { pr_debug("Digest size mismatch (%zx != %x)\n", digest_size, ctx->digest_len); ret = -EBADMSG; goto error_no_desc; } pr_debug("Digest: desc=%zu size=%zu\n", desc_size, digest_size); ret = -ENOMEM; desc = kzalloc(desc_size + digest_size, GFP_KERNEL); if (!desc) goto error_no_desc; desc->tfm = tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; ret = crypto_shash_init(desc); if (ret < 0) goto error; ret = pefile_digest_pe_contents(pebuf, pelen, ctx, desc); if (ret < 0) goto error; digest = (void *)desc + desc_size; ret = crypto_shash_final(desc, digest); if (ret < 0) goto error; pr_debug("Digest calc = [%*ph]\n", ctx->digest_len, digest); /* Check that the PE file digest matches that in the MSCODE part of the * PKCS#7 certificate. */ if (memcmp(digest, ctx->digest, ctx->digest_len) != 0) { pr_debug("Digest mismatch\n"); ret = -EKEYREJECTED; } else { pr_debug("The digests match!\n"); } error: kfree(desc); error_no_desc: crypto_free_shash(tfm); kleave(" = %d", ret); return ret; }
static void ima_free_tfm(struct crypto_shash *tfm) { if (tfm != ima_shash_tfm) crypto_free_shash(tfm); }
/* * Set up the signature parameters in an X.509 certificate. This involves * digesting the signed data and extracting the signature. */ int x509_get_sig_params(struct x509_certificate *cert) { struct public_key_signature *sig = cert->sig; struct crypto_shash *tfm; struct shash_desc *desc; size_t desc_size; int ret; pr_devel("==>%s()\n", __func__); if (!cert->pub->pkey_algo) cert->unsupported_key = true; if (!sig->pkey_algo) cert->unsupported_sig = true; /* We check the hash if we can - even if we can't then verify it */ if (!sig->hash_algo) { cert->unsupported_sig = true; return 0; } sig->s = kmemdup(cert->raw_sig, cert->raw_sig_size, GFP_KERNEL); if (!sig->s) return -ENOMEM; sig->s_size = cert->raw_sig_size; /* Allocate the hashing algorithm we're going to need and find out how * big the hash operational data will be. */ tfm = crypto_alloc_shash(sig->hash_algo, 0, 0); if (IS_ERR(tfm)) { if (PTR_ERR(tfm) == -ENOENT) { cert->unsupported_sig = true; return 0; } return PTR_ERR(tfm); } desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); sig->digest_size = crypto_shash_digestsize(tfm); ret = -ENOMEM; sig->digest = kmalloc(sig->digest_size, GFP_KERNEL); if (!sig->digest) goto error; desc = kzalloc(desc_size, GFP_KERNEL); if (!desc) goto error; desc->tfm = tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, sig->digest); if (ret < 0) goto error_2; ret = is_hash_blacklisted(sig->digest, sig->digest_size, "tbs"); if (ret == -EKEYREJECTED) { pr_err("Cert %*phN is blacklisted\n", sig->digest_size, sig->digest); cert->blacklisted = true; ret = 0; } error_2: kfree(desc); error: crypto_free_shash(tfm); pr_devel("<==%s() = %d\n", __func__, ret); return ret; }
/** * tb_domain_challenge_switch_key() - Challenge and approve switch * @tb: Domain the switch belongs to * @sw: Switch to approve * * For switches that support secure connect, this function generates * random challenge and sends it to the switch. The switch responds to * this and if the response matches our random challenge, the switch is * approved and connected. * * Return: %0 on success and negative errno in case of failure. */ int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw) { u8 challenge[TB_SWITCH_KEY_SIZE]; u8 response[TB_SWITCH_KEY_SIZE]; u8 hmac[TB_SWITCH_KEY_SIZE]; struct tb_switch *parent_sw; struct crypto_shash *tfm; struct shash_desc *shash; int ret; if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key) return -EPERM; /* The parent switch must be authorized before this one */ parent_sw = tb_to_switch(sw->dev.parent); if (!parent_sw || !parent_sw->authorized) return -EINVAL; get_random_bytes(challenge, sizeof(challenge)); ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response); if (ret) return ret; tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); if (IS_ERR(tfm)) return PTR_ERR(tfm); ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE); if (ret) goto err_free_tfm; shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm), GFP_KERNEL); if (!shash) { ret = -ENOMEM; goto err_free_tfm; } shash->tfm = tfm; shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP; memset(hmac, 0, sizeof(hmac)); ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac); if (ret) goto err_free_shash; /* The returned HMAC must match the one we calculated */ if (memcmp(response, hmac, sizeof(hmac))) { ret = -EKEYREJECTED; goto err_free_shash; } crypto_free_shash(tfm); kfree(shash); return tb->cm_ops->approve_switch(tb, sw); err_free_shash: kfree(shash); err_free_tfm: crypto_free_shash(tfm); return ret; }
void rfc6056_teardown(void) { crypto_free_shash(shash); __wkfree("Secret key", secret_key); }
static int chap_server_compute_md5( struct iscsi_conn *conn, struct iscsi_node_auth *auth, char *nr_in_ptr, char *nr_out_ptr, unsigned int *nr_out_len) { unsigned long id; unsigned char id_as_uchar; unsigned char digest[MD5_SIGNATURE_SIZE]; unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2]; unsigned char identifier[10], *challenge = NULL; unsigned char *challenge_binhex = NULL; unsigned char client_digest[MD5_SIGNATURE_SIZE]; unsigned char server_digest[MD5_SIGNATURE_SIZE]; unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH]; size_t compare_len; struct iscsi_chap *chap = conn->auth_protocol; struct crypto_shash *tfm = NULL; struct shash_desc *desc = NULL; int auth_ret = -1, ret, challenge_len; memset(identifier, 0, 10); memset(chap_n, 0, MAX_CHAP_N_SIZE); memset(chap_r, 0, MAX_RESPONSE_LENGTH); memset(digest, 0, MD5_SIGNATURE_SIZE); memset(response, 0, MD5_SIGNATURE_SIZE * 2 + 2); memset(client_digest, 0, MD5_SIGNATURE_SIZE); memset(server_digest, 0, MD5_SIGNATURE_SIZE); challenge = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL); if (!challenge) { pr_err("Unable to allocate challenge buffer\n"); goto out; } challenge_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL); if (!challenge_binhex) { pr_err("Unable to allocate challenge_binhex buffer\n"); goto out; } /* * Extract CHAP_N. */ if (extract_param(nr_in_ptr, "CHAP_N", MAX_CHAP_N_SIZE, chap_n, &type) < 0) { pr_err("Could not find CHAP_N.\n"); goto out; } if (type == HEX) { pr_err("Could not find CHAP_N.\n"); goto out; } /* Include the terminating NULL in the compare */ compare_len = strlen(auth->userid) + 1; if (strncmp(chap_n, auth->userid, compare_len) != 0) { pr_err("CHAP_N values do not match!\n"); goto out; } pr_debug("[server] Got CHAP_N=%s\n", chap_n); /* * Extract CHAP_R. */ if (extract_param(nr_in_ptr, "CHAP_R", MAX_RESPONSE_LENGTH, chap_r, &type) < 0) { pr_err("Could not find CHAP_R.\n"); goto out; } if (type != HEX) { pr_err("Could not find CHAP_R.\n"); goto out; } pr_debug("[server] Got CHAP_R=%s\n", chap_r); chap_string_to_hex(client_digest, chap_r, strlen(chap_r)); tfm = crypto_alloc_shash("md5", 0, 0); if (IS_ERR(tfm)) { tfm = NULL; pr_err("Unable to allocate struct crypto_shash\n"); goto out; } desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL); if (!desc) { pr_err("Unable to allocate struct shash_desc\n"); goto out; } desc->tfm = tfm; desc->flags = 0; ret = crypto_shash_init(desc); if (ret < 0) { pr_err("crypto_shash_init() failed\n"); goto out; } ret = crypto_shash_update(desc, &chap->id, 1); if (ret < 0) { pr_err("crypto_shash_update() failed for id\n"); goto out; } ret = crypto_shash_update(desc, (char *)&auth->password, strlen(auth->password)); if (ret < 0) { pr_err("crypto_shash_update() failed for password\n"); goto out; } ret = crypto_shash_finup(desc, chap->challenge, CHAP_CHALLENGE_LENGTH, server_digest); if (ret < 0) { pr_err("crypto_shash_finup() failed for challenge\n"); goto out; } chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE); pr_debug("[server] MD5 Server Digest: %s\n", response); if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) { pr_debug("[server] MD5 Digests do not match!\n\n"); goto out; } else pr_debug("[server] MD5 Digests match, CHAP connection" " successful.\n\n"); /* * One way authentication has succeeded, return now if mutual * authentication is not enabled. */ if (!auth->authenticate_target) { auth_ret = 0; goto out; } /* * Get CHAP_I. */ if (extract_param(nr_in_ptr, "CHAP_I", 10, identifier, &type) < 0) { pr_err("Could not find CHAP_I.\n"); goto out; } if (type == HEX) ret = kstrtoul(&identifier[2], 0, &id); else ret = kstrtoul(identifier, 0, &id); if (ret < 0) { pr_err("kstrtoul() failed for CHAP identifier: %d\n", ret); goto out; } if (id > 255) { pr_err("chap identifier: %lu greater than 255\n", id); goto out; } /* * RFC 1994 says Identifier is no more than octet (8 bits). */ pr_debug("[server] Got CHAP_I=%lu\n", id); /* * Get CHAP_C. */ if (extract_param(nr_in_ptr, "CHAP_C", CHAP_CHALLENGE_STR_LEN, challenge, &type) < 0) { pr_err("Could not find CHAP_C.\n"); goto out; } if (type != HEX) { pr_err("Could not find CHAP_C.\n"); goto out; } pr_debug("[server] Got CHAP_C=%s\n", challenge); challenge_len = chap_string_to_hex(challenge_binhex, challenge, strlen(challenge)); if (!challenge_len) { pr_err("Unable to convert incoming challenge\n"); goto out; } if (challenge_len > 1024) { pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n"); goto out; } /* * During mutual authentication, the CHAP_C generated by the * initiator must not match the original CHAP_C generated by * the target. */ if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) { pr_err("initiator CHAP_C matches target CHAP_C, failing" " login attempt\n"); goto out; } /* * Generate CHAP_N and CHAP_R for mutual authentication. */ ret = crypto_shash_init(desc); if (ret < 0) { pr_err("crypto_shash_init() failed\n"); goto out; } /* To handle both endiannesses */ id_as_uchar = id; ret = crypto_shash_update(desc, &id_as_uchar, 1); if (ret < 0) { pr_err("crypto_shash_update() failed for id\n"); goto out; } ret = crypto_shash_update(desc, auth->password_mutual, strlen(auth->password_mutual)); if (ret < 0) { pr_err("crypto_shash_update() failed for" " password_mutual\n"); goto out; } /* * Convert received challenge to binary hex. */ ret = crypto_shash_finup(desc, challenge_binhex, challenge_len, digest); if (ret < 0) { pr_err("crypto_shash_finup() failed for ma challenge\n"); goto out; } /* * Generate CHAP_N and CHAP_R. */ *nr_out_len = sprintf(nr_out_ptr, "CHAP_N=%s", auth->userid_mutual); *nr_out_len += 1; pr_debug("[server] Sending CHAP_N=%s\n", auth->userid_mutual); /* * Convert response from binary hex to ascii hext. */ chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE); *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s", response); *nr_out_len += 1; pr_debug("[server] Sending CHAP_R=0x%s\n", response); auth_ret = 0; out: kzfree(desc); if (tfm) crypto_free_shash(tfm); kfree(challenge); kfree(challenge_binhex); return auth_ret; }
/* * Check the signature on a certificate using the provided public key */ static int x509_check_signature(const struct public_key *pub, const struct x509_certificate *cert) { struct public_key_signature *sig; struct crypto_shash *tfm; struct shash_desc *desc; size_t digest_size, desc_size; int ret; pr_devel("==>%s()\n", __func__); /* Allocate the hashing algorithm we're going to need and find out how * big the hash operational data will be. */ tfm = crypto_alloc_shash(pkey_hash_algo[cert->sig_hash_algo], 0, 0); if (IS_ERR(tfm)) return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm); desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); digest_size = crypto_shash_digestsize(tfm); /* We allocate the hash operational data storage on the end of our * context data. */ ret = -ENOMEM; sig = kzalloc(sizeof(*sig) + desc_size + digest_size, GFP_KERNEL); if (!sig) goto error_no_sig; sig->pkey_hash_algo = cert->sig_hash_algo; sig->digest = (u8 *)sig + sizeof(*sig) + desc_size; sig->digest_size = digest_size; desc = (void *)sig + sizeof(*sig); desc->tfm = tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; ret = crypto_shash_init(desc); if (ret < 0) goto error; ret = -ENOMEM; sig->rsa.s = mpi_read_raw_data(cert->sig, cert->sig_size); if (!sig->rsa.s) goto error; ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, sig->digest); if (ret < 0) goto error_mpi; ret = pub->algo->verify_signature(pub, sig); pr_debug("Cert Verification: %d\n", ret); error_mpi: mpi_free(sig->rsa.s); error: kfree(sig); error_no_sig: crypto_free_shash(tfm); pr_devel("<==%s() = %d\n", __func__, ret); return ret; }
int cifs_crypto_shash_allocate(struct TCP_Server_Info *server) { int rc; unsigned int size; server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0); if (IS_ERR(server->secmech.hmacmd5)) { cifs_dbg(VFS, "could not allocate crypto hmacmd5\n"); return PTR_ERR(server->secmech.hmacmd5); } server->secmech.md5 = crypto_alloc_shash("md5", 0, 0); if (IS_ERR(server->secmech.md5)) { cifs_dbg(VFS, "could not allocate crypto md5\n"); rc = PTR_ERR(server->secmech.md5); goto crypto_allocate_md5_fail; } server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0); if (IS_ERR(server->secmech.hmacsha256)) { cifs_dbg(VFS, "could not allocate crypto hmacsha256\n"); rc = PTR_ERR(server->secmech.hmacsha256); goto crypto_allocate_hmacsha256_fail; } server->secmech.cmacaes = crypto_alloc_shash("cmac(aes)", 0, 0); if (IS_ERR(server->secmech.cmacaes)) { cifs_dbg(VFS, "could not allocate crypto cmac-aes"); rc = PTR_ERR(server->secmech.cmacaes); goto crypto_allocate_cmacaes_fail; } size = sizeof(struct shash_desc) + crypto_shash_descsize(server->secmech.hmacmd5); server->secmech.sdeschmacmd5 = kmalloc(size, GFP_KERNEL); if (!server->secmech.sdeschmacmd5) { rc = -ENOMEM; goto crypto_allocate_hmacmd5_sdesc_fail; } server->secmech.sdeschmacmd5->shash.tfm = server->secmech.hmacmd5; server->secmech.sdeschmacmd5->shash.flags = 0x0; size = sizeof(struct shash_desc) + crypto_shash_descsize(server->secmech.md5); server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL); if (!server->secmech.sdescmd5) { rc = -ENOMEM; goto crypto_allocate_md5_sdesc_fail; } server->secmech.sdescmd5->shash.tfm = server->secmech.md5; server->secmech.sdescmd5->shash.flags = 0x0; size = sizeof(struct shash_desc) + crypto_shash_descsize(server->secmech.hmacsha256); server->secmech.sdeschmacsha256 = kmalloc(size, GFP_KERNEL); if (!server->secmech.sdeschmacsha256) { rc = -ENOMEM; goto crypto_allocate_hmacsha256_sdesc_fail; } server->secmech.sdeschmacsha256->shash.tfm = server->secmech.hmacsha256; server->secmech.sdeschmacsha256->shash.flags = 0x0; size = sizeof(struct shash_desc) + crypto_shash_descsize(server->secmech.cmacaes); server->secmech.sdesccmacaes = kmalloc(size, GFP_KERNEL); if (!server->secmech.sdesccmacaes) { cifs_dbg(VFS, "%s: Can't alloc cmacaes\n", __func__); rc = -ENOMEM; goto crypto_allocate_cmacaes_sdesc_fail; } server->secmech.sdesccmacaes->shash.tfm = server->secmech.cmacaes; server->secmech.sdesccmacaes->shash.flags = 0x0; return 0; crypto_allocate_cmacaes_sdesc_fail: kfree(server->secmech.sdeschmacsha256); crypto_allocate_hmacsha256_sdesc_fail: kfree(server->secmech.sdescmd5); crypto_allocate_md5_sdesc_fail: kfree(server->secmech.sdeschmacmd5); crypto_allocate_hmacmd5_sdesc_fail: crypto_free_shash(server->secmech.cmacaes); crypto_allocate_cmacaes_fail: crypto_free_shash(server->secmech.hmacsha256); crypto_allocate_hmacsha256_fail: crypto_free_shash(server->secmech.md5); crypto_allocate_md5_fail: crypto_free_shash(server->secmech.hmacmd5); return rc; }
static void __exit libcrc32c_mod_fini(void) { crypto_free_shash(tfm); }
/* * Digest the relevant parts of the PKCS#7 data */ static int pkcs7_digest(struct pkcs7_message *pkcs7, struct pkcs7_signed_info *sinfo) { struct crypto_shash *tfm; struct shash_desc *desc; size_t digest_size, desc_size; void *digest; int ret; kenter(",%u,%u", sinfo->index, sinfo->sig.pkey_hash_algo); if (sinfo->sig.pkey_hash_algo >= PKEY_HASH__LAST || !hash_algo_name[sinfo->sig.pkey_hash_algo]) return -ENOPKG; /* Allocate the hashing algorithm we're going to need and find out how * big the hash operational data will be. */ tfm = crypto_alloc_shash(hash_algo_name[sinfo->sig.pkey_hash_algo], 0, 0); if (IS_ERR(tfm)) return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm); desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); sinfo->sig.digest_size = digest_size = crypto_shash_digestsize(tfm); ret = -ENOMEM; digest = kzalloc(ALIGN(digest_size, __alignof__(*desc)) + desc_size, GFP_KERNEL); if (!digest) goto error_no_desc; desc = PTR_ALIGN(digest + digest_size, __alignof__(*desc)); desc->tfm = tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; /* Digest the message [RFC2315 9.3] */ ret = crypto_shash_init(desc); if (ret < 0) goto error; ret = crypto_shash_finup(desc, pkcs7->data, pkcs7->data_len, digest); if (ret < 0) goto error; pr_devel("MsgDigest = [%*ph]\n", 8, digest); /* However, if there are authenticated attributes, there must be a * message digest attribute amongst them which corresponds to the * digest we just calculated. */ if (sinfo->authattrs) { u8 tag; if (!sinfo->msgdigest) { pr_warn("Sig %u: No messageDigest\n", sinfo->index); ret = -EKEYREJECTED; goto error; } if (sinfo->msgdigest_len != sinfo->sig.digest_size) { pr_debug("Sig %u: Invalid digest size (%u)\n", sinfo->index, sinfo->msgdigest_len); ret = -EBADMSG; goto error; } if (memcmp(digest, sinfo->msgdigest, sinfo->msgdigest_len) != 0) { pr_debug("Sig %u: Message digest doesn't match\n", sinfo->index); ret = -EKEYREJECTED; goto error; } /* We then calculate anew, using the authenticated attributes * as the contents of the digest instead. Note that we need to * convert the attributes from a CONT.0 into a SET before we * hash it. */ memset(digest, 0, sinfo->sig.digest_size); ret = crypto_shash_init(desc); if (ret < 0) goto error; tag = ASN1_CONS_BIT | ASN1_SET; ret = crypto_shash_update(desc, &tag, 1); if (ret < 0) goto error; ret = crypto_shash_finup(desc, sinfo->authattrs, sinfo->authattrs_len, digest); if (ret < 0) goto error; pr_devel("AADigest = [%*ph]\n", 8, digest); } sinfo->sig.digest = digest; digest = NULL; error: kfree(digest); error_no_desc: crypto_free_shash(tfm); kleave(" = %d", ret); return ret; }