static struct shash_desc *chcr_alloc_shash(unsigned int ds) { struct crypto_shash *base_hash = NULL; struct shash_desc *desc; switch (ds) { case SHA1_DIGEST_SIZE: base_hash = crypto_alloc_shash("sha1-generic", 0, 0); break; case SHA224_DIGEST_SIZE: base_hash = crypto_alloc_shash("sha224-generic", 0, 0); break; case SHA256_DIGEST_SIZE: base_hash = crypto_alloc_shash("sha256-generic", 0, 0); break; case SHA384_DIGEST_SIZE: base_hash = crypto_alloc_shash("sha384-generic", 0, 0); break; case SHA512_DIGEST_SIZE: base_hash = crypto_alloc_shash("sha512-generic", 0, 0); break; } if (IS_ERR(base_hash)) { pr_err("Can not allocate sha-generic algo.\n"); return (void *)base_hash; } desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(base_hash), GFP_KERNEL); if (!desc) return ERR_PTR(-ENOMEM); desc->tfm = base_hash; desc->flags = crypto_shash_get_flags(base_hash); return desc; }
static int __init trusted_shash_alloc(void) { int ret; hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hmacalg)) { pr_info("trusted_key: could not allocate crypto %s\n", hmac_alg); return PTR_ERR(hmacalg); } hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hashalg)) { pr_info("trusted_key: could not allocate crypto %s\n", hash_alg); ret = PTR_ERR(hashalg); goto hashalg_fail; } return 0; hashalg_fail: crypto_free_shash(hmacalg); return ret; }
int cifs_crypto_shash_allocate(struct TCP_Server_Info *server) { int rc; unsigned int size; server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0); if (!server->secmech.hmacmd5 || IS_ERR(server->secmech.hmacmd5)) { cERROR(1, "could not allocate crypto hmacmd5\n"); return PTR_ERR(server->secmech.hmacmd5); } server->secmech.md5 = crypto_alloc_shash("md5", 0, 0); if (!server->secmech.md5 || IS_ERR(server->secmech.md5)) { cERROR(1, "could not allocate crypto md5\n"); rc = PTR_ERR(server->secmech.md5); goto crypto_allocate_md5_fail; } size = sizeof(struct shash_desc) + crypto_shash_descsize(server->secmech.hmacmd5); server->secmech.sdeschmacmd5 = kmalloc(size, GFP_KERNEL); if (!server->secmech.sdeschmacmd5) { cERROR(1, "cifs_crypto_shash_allocate: can't alloc hmacmd5\n"); rc = -ENOMEM; goto crypto_allocate_hmacmd5_sdesc_fail; } server->secmech.sdeschmacmd5->shash.tfm = server->secmech.hmacmd5; server->secmech.sdeschmacmd5->shash.flags = 0x0; size = sizeof(struct shash_desc) + crypto_shash_descsize(server->secmech.md5); server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL); if (!server->secmech.sdescmd5) { cERROR(1, "cifs_crypto_shash_allocate: can't alloc md5\n"); rc = -ENOMEM; goto crypto_allocate_md5_sdesc_fail; } server->secmech.sdescmd5->shash.tfm = server->secmech.md5; server->secmech.sdescmd5->shash.flags = 0x0; return 0; crypto_allocate_md5_sdesc_fail: kfree(server->secmech.sdeschmacmd5); crypto_allocate_hmacmd5_sdesc_fail: crypto_free_shash(server->secmech.md5); crypto_allocate_md5_fail: crypto_free_shash(server->secmech.hmacmd5); return rc; }
static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti, const char *opts) { struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) { ti->error = "Wrong key size for TCW"; return -EINVAL; } tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0); if (IS_ERR(tcw->crc32_tfm)) { ti->error = "Error initializing CRC32 in TCW"; return PTR_ERR(tcw->crc32_tfm); } tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL); tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL); if (!tcw->iv_seed || !tcw->whitening) { crypt_iv_tcw_dtr(cc); ti->error = "Error allocating seed storage in TCW"; return -ENOMEM; } return 0; }
static int init_dedup_info(struct btrfs_fs_info *fs_info, u16 type, u16 backend, u64 blocksize, u64 limit) { struct btrfs_dedup_info *dedup_info; int ret; fs_info->dedup_info = kzalloc(sizeof(*dedup_info), GFP_NOFS); if (!fs_info->dedup_info) return -ENOMEM; dedup_info = fs_info->dedup_info; dedup_info->hash_type = type; dedup_info->backend = backend; dedup_info->blocksize = blocksize; dedup_info->limit_nr = limit; /* Only support SHA256 yet */ dedup_info->dedup_driver = crypto_alloc_shash("sha256", 0, 0); if (IS_ERR(dedup_info->dedup_driver)) { btrfs_err(fs_info, "failed to init sha256 driver"); ret = PTR_ERR(dedup_info->dedup_driver); kfree(fs_info->dedup_info); fs_info->dedup_info = NULL; return ret; } dedup_info->hash_root = RB_ROOT; dedup_info->bytenr_root = RB_ROOT; dedup_info->current_nr = 0; INIT_LIST_HEAD(&dedup_info->lru_list); mutex_init(&dedup_info->lock); return 0; }
static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, const char *opts) { struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); if (IS_ERR(lmk->hash_tfm)) { ti->error = "Error initializing LMK hash"; return PTR_ERR(lmk->hash_tfm); } /* No seed in LMK version 2 */ if (cc->key_parts == cc->tfms_count) { lmk->seed = NULL; return 0; } lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL); if (!lmk->seed) { crypt_iv_lmk_dtr(cc); ti->error = "Error kmallocing seed storage in LMK"; return -ENOMEM; } return 0; }
static int symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash) { int rc; unsigned int size; struct crypto_shash *md5; struct sdesc *sdescmd5; md5 = crypto_alloc_shash("md5", 0, 0); if (IS_ERR(md5)) { rc = PTR_ERR(md5); cERROR(1, "%s: Crypto md5 allocation error %d\n", __func__, rc); return rc; } size = sizeof(struct shash_desc) + crypto_shash_descsize(md5); sdescmd5 = kmalloc(size, GFP_KERNEL); if (!sdescmd5) { rc = -ENOMEM; cERROR(1, "%s: Memory allocation failure\n", __func__); goto symlink_hash_err; } sdescmd5->shash.tfm = md5; sdescmd5->shash.flags = 0x0; rc = crypto_shash_init(&sdescmd5->shash); if (rc) { cERROR(1, "%s: Could not init md5 shash\n", __func__); goto symlink_hash_err; } <<<<<<< HEAD
static int smb2_crypto_shash_allocate(struct TCP_Server_Info *server) { int rc; unsigned int size; if (server->secmech.sdeschmacsha256 != NULL) return 0; /* already allocated */ server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0); if (IS_ERR(server->secmech.hmacsha256)) { cifs_dbg(VFS, "could not allocate crypto hmacsha256\n"); rc = PTR_ERR(server->secmech.hmacsha256); server->secmech.hmacsha256 = NULL; return rc; } size = sizeof(struct shash_desc) + crypto_shash_descsize(server->secmech.hmacsha256); server->secmech.sdeschmacsha256 = kmalloc(size, GFP_KERNEL); if (!server->secmech.sdeschmacsha256) { crypto_free_shash(server->secmech.hmacsha256); server->secmech.hmacsha256 = NULL; return -ENOMEM; } server->secmech.sdeschmacsha256->shash.tfm = server->secmech.hmacsha256; server->secmech.sdeschmacsha256->shash.flags = 0x0; return 0; }
static int kdf_alloc(struct kdf_sdesc **sdesc_ret, char *hashname) { struct crypto_shash *tfm; struct kdf_sdesc *sdesc; int size; int err; /* allocate synchronous hash */ tfm = crypto_alloc_shash(hashname, 0, 0); if (IS_ERR(tfm)) { pr_info("could not allocate digest TFM handle %s\n", hashname); return PTR_ERR(tfm); } err = -EINVAL; if (crypto_shash_digestsize(tfm) == 0) goto out_free_tfm; err = -ENOMEM; size = sizeof(struct shash_desc) + crypto_shash_descsize(tfm); sdesc = kmalloc(size, GFP_KERNEL); if (!sdesc) goto out_free_tfm; sdesc->shash.tfm = tfm; sdesc->shash.flags = 0x0; *sdesc_ret = sdesc; return 0; out_free_tfm: crypto_free_shash(tfm); return err; }
/* * Set up the signature parameters in an X.509 certificate. This involves * digesting the signed data and extracting the signature. */ int x509_get_sig_params(struct x509_certificate *cert) { struct crypto_shash *tfm; struct shash_desc *desc; size_t digest_size, desc_size; void *digest; int ret; pr_devel("==>%s()\n", __func__); if (cert->unsupported_crypto) return -ENOPKG; if (cert->sig.rsa.s) return 0; cert->sig.rsa.s = mpi_read_raw_data(cert->raw_sig, cert->raw_sig_size); if (!cert->sig.rsa.s) return -ENOMEM; cert->sig.nr_mpi = 1; /* Allocate the hashing algorithm we're going to need and find out how * big the hash operational data will be. */ tfm = crypto_alloc_shash(hash_algo_name[cert->sig.pkey_hash_algo], 0, 0); if (IS_ERR(tfm)) { if (PTR_ERR(tfm) == -ENOENT) { cert->unsupported_crypto = true; return -ENOPKG; } return PTR_ERR(tfm); } desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); digest_size = crypto_shash_digestsize(tfm); /* We allocate the hash operational data storage on the end of the * digest storage space. */ ret = -ENOMEM; digest = kzalloc(digest_size + desc_size, GFP_KERNEL); if (!digest) goto error; cert->sig.digest = digest; cert->sig.digest_size = digest_size; desc = digest + digest_size; desc->tfm = tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; ret = crypto_shash_init(desc); if (ret < 0) goto error; might_sleep(); ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, digest); error: crypto_free_shash(tfm); pr_devel("<==%s() = %d\n", __func__, ret); return ret; }
static int p8_ghash_init_tfm(struct crypto_tfm *tfm) { const char *alg; struct crypto_shash *fallback; struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm); struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); if (!(alg = crypto_tfm_alg_name(tfm))) { printk(KERN_ERR "Failed to get algorithm name.\n"); return -ENOENT; } fallback = crypto_alloc_shash(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(fallback)) { printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", alg, PTR_ERR(fallback)); return PTR_ERR(fallback); } printk(KERN_INFO "Using '%s' as fallback implementation.\n", crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback))); crypto_shash_set_flags(fallback, crypto_shash_get_flags((struct crypto_shash *) tfm)); ctx->fallback = fallback; shash_tfm->descsize = sizeof(struct p8_ghash_desc_ctx) + crypto_shash_descsize(fallback); return 0; }
static struct shash_desc *init_desc(void) { int rc; struct shash_desc *desc; if (hmac_tfm == NULL) { hmac_tfm = crypto_alloc_shash(evm_hmac, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hmac_tfm)) { pr_err("Can not allocate %s (reason: %ld)\n", evm_hmac, PTR_ERR(hmac_tfm)); rc = PTR_ERR(hmac_tfm); hmac_tfm = NULL; return ERR_PTR(rc); } } desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac_tfm), GFP_KERNEL); if (!desc) return ERR_PTR(-ENOMEM); desc->tfm = hmac_tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len); if (rc) goto out; rc = crypto_shash_init(desc); out: if (rc) { kfree(desc); return ERR_PTR(rc); } return desc; }
static int derive_essiv_salt(const u8 *key, int keysize, u8 *salt) { struct crypto_shash *tfm = READ_ONCE(essiv_hash_tfm); /* init hash transform on demand */ if (unlikely(!tfm)) { struct crypto_shash *prev_tfm; tfm = crypto_alloc_shash("sha256", 0, 0); if (IS_ERR(tfm)) { pr_warn_ratelimited("fscrypt: error allocating SHA-256 transform: %ld\n", PTR_ERR(tfm)); return PTR_ERR(tfm); } prev_tfm = cmpxchg(&essiv_hash_tfm, NULL, tfm); if (prev_tfm) { crypto_free_shash(tfm); tfm = prev_tfm; } } { SHASH_DESC_ON_STACK(desc, tfm); desc->tfm = tfm; desc->flags = 0; return crypto_shash_digest(desc, key, keysize, salt); } }
static int __init libcrc32c_mod_init(void) { tfm = crypto_alloc_shash("crc32c", 0, 0); if (IS_ERR(tfm)) return PTR_ERR(tfm); return 0; }
int __init btrfs_hash_init(void) { tfm = crypto_alloc_shash("crc32c", 0, 0); if (IS_ERR(tfm)) return PTR_ERR(tfm); return 0; }
/* * Digest the module contents. */ static struct public_key_signature *mod_make_digest(enum pkey_hash_algo hash, const void *mod, unsigned long modlen) { struct public_key_signature *pks; struct crypto_shash *tfm; struct shash_desc *desc; size_t digest_size, desc_size; int ret; pr_devel("==>%s()\n", __func__); /* Allocate the hashing algorithm we're going to need and find out how * big the hash operational data will be. */ tfm = crypto_alloc_shash(pkey_hash_algo[hash], 0, 0); if (IS_ERR(tfm)) return (PTR_ERR(tfm) == -ENOENT) ? ERR_PTR(-ENOPKG) : ERR_CAST(tfm); desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); digest_size = crypto_shash_digestsize(tfm); /* We allocate the hash operational data storage on the end of our * context data and the digest output buffer on the end of that. */ ret = -ENOMEM; pks = kzalloc(digest_size + sizeof(*pks) + desc_size, GFP_KERNEL); if (!pks) goto error_no_pks; pks->pkey_hash_algo = hash; pks->digest = (u8 *)pks + sizeof(*pks) + desc_size; pks->digest_size = digest_size; desc = (void *)pks + sizeof(*pks); desc->tfm = tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; ret = crypto_shash_init(desc); if (ret < 0) goto error; ret = crypto_shash_finup(desc, mod, modlen, pks->digest); if (ret < 0) goto error; crypto_free_shash(tfm); pr_devel("<==%s() = ok\n", __func__); return pks; error: kfree(pks); error_no_pks: crypto_free_shash(tfm); pr_devel("<==%s() = %d\n", __func__, ret); return ERR_PTR(ret); }
static struct shash_desc *init_desc(char type) { long rc; char *algo; struct crypto_shash **tfm; struct shash_desc *desc; if (type == EVM_XATTR_HMAC) { tfm = &hmac_tfm; algo = evm_hmac; } else { tfm = &hash_tfm; algo = evm_hash; } if (*tfm == NULL) { mutex_lock(&mutex); if (*tfm) goto out; *tfm = crypto_alloc_shash(algo, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(*tfm)) { rc = PTR_ERR(*tfm); pr_err("Can not allocate %s (reason: %ld)\n", algo, rc); *tfm = NULL; mutex_unlock(&mutex); return ERR_PTR(rc); } if (type == EVM_XATTR_HMAC) { rc = crypto_shash_setkey(*tfm, evmkey, evmkey_len); if (rc) { crypto_free_shash(*tfm); *tfm = NULL; mutex_unlock(&mutex); return ERR_PTR(rc); } } out: mutex_unlock(&mutex); } desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm), GFP_KERNEL); if (!desc) return ERR_PTR(-ENOMEM); desc->tfm = *tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; rc = crypto_shash_init(desc); if (rc) { kfree(desc); return ERR_PTR(rc); } return desc; }
int ima_init_crypto(void) { long rc; ima_shash_tfm = crypto_alloc_shash(ima_hash, 0, 0); if (IS_ERR(ima_shash_tfm)) { rc = PTR_ERR(ima_shash_tfm); pr_err("Can not allocate %s (reason: %ld)\n", ima_hash, rc); return rc; } return 0; }
int orinoco_mic_init(struct orinoco_private *priv) { priv->tx_tfm_mic = crypto_alloc_shash("michael_mic", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tx_tfm_mic)) { printk(KERN_DEBUG "orinoco_mic_init: could not allocate " "crypto API michael_mic\n"); priv->tx_tfm_mic = NULL; return -ENOMEM; } priv->rx_tfm_mic = crypto_alloc_shash("michael_mic", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->rx_tfm_mic)) { printk(KERN_DEBUG "orinoco_mic_init: could not allocate " "crypto API michael_mic\n"); priv->rx_tfm_mic = NULL; return -ENOMEM; } return 0; }
static int cc_cipher_init(struct crypto_tfm *tfm) { struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); struct crypto_alg *alg = tfm->__crt_alg; struct cc_crypto_alg *cc_alg = container_of(alg, struct cc_crypto_alg, crypto_alg); struct device *dev = drvdata_to_dev(cc_alg->drvdata); int rc = 0; unsigned int max_key_buf_size = get_max_keysize(tfm); struct ablkcipher_tfm *ablktfm = &tfm->crt_ablkcipher; dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p, crypto_tfm_alg_name(tfm)); ablktfm->reqsize = sizeof(struct blkcipher_req_ctx); ctx_p->cipher_mode = cc_alg->cipher_mode; ctx_p->flow_mode = cc_alg->flow_mode; ctx_p->drvdata = cc_alg->drvdata; /* Allocate key buffer, cache line aligned */ ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL); if (!ctx_p->user.key) return -ENOMEM; dev_dbg(dev, "Allocated key buffer in context. key=@%p\n", ctx_p->user.key); /* Map key buffer */ ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key, max_key_buf_size, DMA_TO_DEVICE); if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) { dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n", max_key_buf_size, ctx_p->user.key); return -ENOMEM; } dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n", max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr); if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { /* Alloc hash tfm for essiv */ ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0); if (IS_ERR(ctx_p->shash_tfm)) { dev_err(dev, "Error allocating hash tfm for ESSIV.\n"); return PTR_ERR(ctx_p->shash_tfm); } } return rc; }
int __init ima_init_crypto(void) { long rc; ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0); if (IS_ERR(ima_shash_tfm)) { rc = PTR_ERR(ima_shash_tfm); pr_err("Can not allocate %s (reason: %ld)\n", hash_algo_name[ima_hash_algo], rc); return rc; } pr_info("Allocated hash algorithm: %s\n", hash_algo_name[ima_hash_algo]); return 0; }
static int sahara_sha_cra_init(struct crypto_tfm *tfm) { const char *name = crypto_tfm_alg_name(tfm); struct sahara_ctx *ctx = crypto_tfm_ctx(tfm); ctx->shash_fallback = crypto_alloc_shash(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->shash_fallback)) { pr_err("Error allocating fallback algo %s\n", name); return PTR_ERR(ctx->shash_fallback); } crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct sahara_sha_reqctx) + SHA_BUFFER_LEN + SHA256_BLOCK_SIZE); return 0; }
static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo) { struct crypto_shash *tfm = ima_shash_tfm; int rc; if (algo < 0 || algo >= HASH_ALGO__LAST) algo = ima_hash_algo; if (algo != ima_hash_algo) { tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0); if (IS_ERR(tfm)) { rc = PTR_ERR(tfm); pr_err("Can not allocate %s (reason: %d)\n", hash_algo_name[algo], rc); } } return tfm; }
static int symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash) { int rc; unsigned int size; struct crypto_shash *md5; struct sdesc *sdescmd5; md5 = crypto_alloc_shash("md5", 0, 0); if (IS_ERR(md5)) { rc = PTR_ERR(md5); cERROR(1, "%s: Crypto md5 allocation error %d", __func__, rc); return rc; } size = sizeof(struct shash_desc) + crypto_shash_descsize(md5); sdescmd5 = kmalloc(size, GFP_KERNEL); if (!sdescmd5) { rc = -ENOMEM; cERROR(1, "%s: Memory allocation failure", __func__); goto symlink_hash_err; } sdescmd5->shash.tfm = md5; sdescmd5->shash.flags = 0x0; rc = crypto_shash_init(&sdescmd5->shash); if (rc) { cERROR(1, "%s: Could not init md5 shash", __func__); goto symlink_hash_err; } rc = crypto_shash_update(&sdescmd5->shash, link_str, link_len); if (rc) { cERROR(1, "%s: Could not update with link_str", __func__); goto symlink_hash_err; } rc = crypto_shash_final(&sdescmd5->shash, md5_hash); if (rc) cERROR(1, "%s: Could not generate md5 hash", __func__); symlink_hash_err: crypto_free_shash(md5); kfree(sdescmd5); return rc; }
static int smb3_crypto_shash_allocate(struct TCP_Server_Info *server) { unsigned int size; int rc; if (server->secmech.sdesccmacaes != NULL) return 0; /* already allocated */ rc = smb2_crypto_shash_allocate(server); if (rc) return rc; server->secmech.cmacaes = crypto_alloc_shash("cmac(aes)", 0, 0); if (IS_ERR(server->secmech.cmacaes)) { cifs_dbg(VFS, "could not allocate crypto cmac-aes"); kfree(server->secmech.sdeschmacsha256); server->secmech.sdeschmacsha256 = NULL; crypto_free_shash(server->secmech.hmacsha256); server->secmech.hmacsha256 = NULL; rc = PTR_ERR(server->secmech.cmacaes); server->secmech.cmacaes = NULL; return rc; } size = sizeof(struct shash_desc) + crypto_shash_descsize(server->secmech.cmacaes); server->secmech.sdesccmacaes = kmalloc(size, GFP_KERNEL); if (!server->secmech.sdesccmacaes) { cifs_dbg(VFS, "%s: Can't alloc cmacaes\n", __func__); kfree(server->secmech.sdeschmacsha256); server->secmech.sdeschmacsha256 = NULL; crypto_free_shash(server->secmech.hmacsha256); crypto_free_shash(server->secmech.cmacaes); server->secmech.hmacsha256 = NULL; server->secmech.cmacaes = NULL; return -ENOMEM; } server->secmech.sdesccmacaes->shash.tfm = server->secmech.cmacaes; server->secmech.sdesccmacaes->shash.flags = 0x0; return 0; }
static int __init init_profile_hash(void) { struct crypto_shash *tfm; if (!apparmor_initialized) return 0; tfm = crypto_alloc_shash("sha1", 0, 0); if (IS_ERR(tfm)) { int error = PTR_ERR(tfm); AA_ERROR("failed to setup profile sha1 hashing: %d\n", error); return error; } apparmor_tfm = tfm; apparmor_hash_size = crypto_shash_digestsize(apparmor_tfm); aa_info_message("AppArmor sha1 policy hashing enabled"); return 0; }
static int calc_hash(const u8 *src, int src_len, u8 *out, struct device *dev) { struct crypto_shash *shash; struct sdesc *desc; int size; int ret = -EFAULT; shash = crypto_alloc_shash(HASH_ALG, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(shash)) { dev_err(dev, "%s: Error. crypto_alloc_shash.\n", __func__); goto err_shash; } size = sizeof(struct shash_desc) + crypto_shash_descsize(shash); desc = kmalloc(size, GFP_KERNEL); if (!desc) { dev_err(dev, "%s: Error. No enough mem for Desc.\n", __func__); ret = -ENOMEM; goto err_desc; } desc->shash.tfm = shash; desc->shash.flags = 0x00; if (crypto_shash_digest(&desc->shash, src, src_len, out)) { dev_err(dev, "%s: Error. generate hash.\n", __func__); goto err_generate; } ret = 0; err_generate: kfree(desc); err_desc: crypto_free_shash(shash); err_shash: return ret; }
int rfc6056_setup(void) { int error; /* Secret key stuff */ secret_key_len = (PAGE_SIZE < 128) ? PAGE_SIZE : 128; secret_key = __wkmalloc("Secret key", secret_key_len, GFP_KERNEL); if (!secret_key) return -ENOMEM; get_random_bytes(secret_key, secret_key_len); /* TFC stuff */ shash = crypto_alloc_shash("md5", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(shash)) { error = PTR_ERR(shash); log_warn_once("Failed to load transform for MD5; errcode %d", error); __wkfree("Secret key", secret_key); return error; } return 0; }
/* produce a md4 message digest from data of length n bytes */ int mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len) { int rc; unsigned int size; struct crypto_shash *md4; struct sdesc *sdescmd4; md4 = crypto_alloc_shash("md4", 0, 0); if (IS_ERR(md4)) { cERROR(1, "%s: Crypto md4 allocation error %d\n", __func__, rc); return PTR_ERR(md4); } size = sizeof(struct shash_desc) + crypto_shash_descsize(md4); sdescmd4 = kmalloc(size, GFP_KERNEL); if (!sdescmd4) { rc = -ENOMEM; cERROR(1, "%s: Memory allocation failure\n", __func__); goto mdfour_err; } sdescmd4->shash.tfm = md4; sdescmd4->shash.flags = 0x0; rc = crypto_shash_init(&sdescmd4->shash); if (rc) { cERROR(1, "%s: Could not init md4 shash\n", __func__); goto mdfour_err; } crypto_shash_update(&sdescmd4->shash, link_str, link_len); rc = crypto_shash_final(&sdescmd4->shash, md4_hash); mdfour_err: crypto_free_shash(md4); kfree(sdescmd4); return rc; }
static int __init libcrc32c_mod_init(void) { tfm = crypto_alloc_shash("crc32c", 0, 0); return PTR_ERR_OR_ZERO(tfm); }