int cifs_crypto_shash_allocate(struct TCP_Server_Info *server) { int rc; unsigned int size; server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0); if (!server->secmech.hmacmd5 || IS_ERR(server->secmech.hmacmd5)) { cERROR(1, "could not allocate crypto hmacmd5\n"); return PTR_ERR(server->secmech.hmacmd5); } server->secmech.md5 = crypto_alloc_shash("md5", 0, 0); if (!server->secmech.md5 || IS_ERR(server->secmech.md5)) { cERROR(1, "could not allocate crypto md5\n"); rc = PTR_ERR(server->secmech.md5); goto crypto_allocate_md5_fail; } size = sizeof(struct shash_desc) + crypto_shash_descsize(server->secmech.hmacmd5); server->secmech.sdeschmacmd5 = kmalloc(size, GFP_KERNEL); if (!server->secmech.sdeschmacmd5) { cERROR(1, "cifs_crypto_shash_allocate: can't alloc hmacmd5\n"); rc = -ENOMEM; goto crypto_allocate_hmacmd5_sdesc_fail; } server->secmech.sdeschmacmd5->shash.tfm = server->secmech.hmacmd5; server->secmech.sdeschmacmd5->shash.flags = 0x0; size = sizeof(struct shash_desc) + crypto_shash_descsize(server->secmech.md5); server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL); if (!server->secmech.sdescmd5) { cERROR(1, "cifs_crypto_shash_allocate: can't alloc md5\n"); rc = -ENOMEM; goto crypto_allocate_md5_sdesc_fail; } server->secmech.sdescmd5->shash.tfm = server->secmech.md5; server->secmech.sdescmd5->shash.flags = 0x0; return 0; crypto_allocate_md5_sdesc_fail: kfree(server->secmech.sdeschmacmd5); crypto_allocate_hmacmd5_sdesc_fail: crypto_free_shash(server->secmech.md5); crypto_allocate_md5_fail: crypto_free_shash(server->secmech.hmacmd5); return rc; }
static int symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash) { int rc; unsigned int size; struct crypto_shash *md5; struct sdesc *sdescmd5; md5 = crypto_alloc_shash("md5", 0, 0); if (IS_ERR(md5)) { rc = PTR_ERR(md5); cERROR(1, "%s: Crypto md5 allocation error %d\n", __func__, rc); return rc; } size = sizeof(struct shash_desc) + crypto_shash_descsize(md5); sdescmd5 = kmalloc(size, GFP_KERNEL); if (!sdescmd5) { rc = -ENOMEM; cERROR(1, "%s: Memory allocation failure\n", __func__); goto symlink_hash_err; } sdescmd5->shash.tfm = md5; sdescmd5->shash.flags = 0x0; rc = crypto_shash_init(&sdescmd5->shash); if (rc) { cERROR(1, "%s: Could not init md5 shash\n", __func__); goto symlink_hash_err; } <<<<<<< HEAD
static int smb2_crypto_shash_allocate(struct TCP_Server_Info *server) { int rc; unsigned int size; if (server->secmech.sdeschmacsha256 != NULL) return 0; /* already allocated */ server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0); if (IS_ERR(server->secmech.hmacsha256)) { cifs_dbg(VFS, "could not allocate crypto hmacsha256\n"); rc = PTR_ERR(server->secmech.hmacsha256); server->secmech.hmacsha256 = NULL; return rc; } size = sizeof(struct shash_desc) + crypto_shash_descsize(server->secmech.hmacsha256); server->secmech.sdeschmacsha256 = kmalloc(size, GFP_KERNEL); if (!server->secmech.sdeschmacsha256) { crypto_free_shash(server->secmech.hmacsha256); server->secmech.hmacsha256 = NULL; return -ENOMEM; } server->secmech.sdeschmacsha256->shash.tfm = server->secmech.hmacsha256; server->secmech.sdeschmacsha256->shash.flags = 0x0; return 0; }
static int kdf_alloc(struct kdf_sdesc **sdesc_ret, char *hashname) { struct crypto_shash *tfm; struct kdf_sdesc *sdesc; int size; int err; /* allocate synchronous hash */ tfm = crypto_alloc_shash(hashname, 0, 0); if (IS_ERR(tfm)) { pr_info("could not allocate digest TFM handle %s\n", hashname); return PTR_ERR(tfm); } err = -EINVAL; if (crypto_shash_digestsize(tfm) == 0) goto out_free_tfm; err = -ENOMEM; size = sizeof(struct shash_desc) + crypto_shash_descsize(tfm); sdesc = kmalloc(size, GFP_KERNEL); if (!sdesc) goto out_free_tfm; sdesc->shash.tfm = tfm; sdesc->shash.flags = 0x0; *sdesc_ret = sdesc; return 0; out_free_tfm: crypto_free_shash(tfm); return err; }
/* * Set up the signature parameters in an X.509 certificate. This involves * digesting the signed data and extracting the signature. */ int x509_get_sig_params(struct x509_certificate *cert) { struct crypto_shash *tfm; struct shash_desc *desc; size_t digest_size, desc_size; void *digest; int ret; pr_devel("==>%s()\n", __func__); if (cert->unsupported_crypto) return -ENOPKG; if (cert->sig.rsa.s) return 0; cert->sig.rsa.s = mpi_read_raw_data(cert->raw_sig, cert->raw_sig_size); if (!cert->sig.rsa.s) return -ENOMEM; cert->sig.nr_mpi = 1; /* Allocate the hashing algorithm we're going to need and find out how * big the hash operational data will be. */ tfm = crypto_alloc_shash(hash_algo_name[cert->sig.pkey_hash_algo], 0, 0); if (IS_ERR(tfm)) { if (PTR_ERR(tfm) == -ENOENT) { cert->unsupported_crypto = true; return -ENOPKG; } return PTR_ERR(tfm); } desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); digest_size = crypto_shash_digestsize(tfm); /* We allocate the hash operational data storage on the end of the * digest storage space. */ ret = -ENOMEM; digest = kzalloc(digest_size + desc_size, GFP_KERNEL); if (!digest) goto error; cert->sig.digest = digest; cert->sig.digest_size = digest_size; desc = digest + digest_size; desc->tfm = tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; ret = crypto_shash_init(desc); if (ret < 0) goto error; might_sleep(); ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, digest); error: crypto_free_shash(tfm); pr_devel("<==%s() = %d\n", __func__, ret); return ret; }
static struct shash_desc *chcr_alloc_shash(unsigned int ds) { struct crypto_shash *base_hash = NULL; struct shash_desc *desc; switch (ds) { case SHA1_DIGEST_SIZE: base_hash = crypto_alloc_shash("sha1-generic", 0, 0); break; case SHA224_DIGEST_SIZE: base_hash = crypto_alloc_shash("sha224-generic", 0, 0); break; case SHA256_DIGEST_SIZE: base_hash = crypto_alloc_shash("sha256-generic", 0, 0); break; case SHA384_DIGEST_SIZE: base_hash = crypto_alloc_shash("sha384-generic", 0, 0); break; case SHA512_DIGEST_SIZE: base_hash = crypto_alloc_shash("sha512-generic", 0, 0); break; } if (IS_ERR(base_hash)) { pr_err("Can not allocate sha-generic algo.\n"); return (void *)base_hash; } desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(base_hash), GFP_KERNEL); if (!desc) return ERR_PTR(-ENOMEM); desc->tfm = base_hash; desc->flags = crypto_shash_get_flags(base_hash); return desc; }
static struct shash_desc *init_desc(void) { int rc; struct shash_desc *desc; if (hmac_tfm == NULL) { hmac_tfm = crypto_alloc_shash(evm_hmac, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hmac_tfm)) { pr_err("Can not allocate %s (reason: %ld)\n", evm_hmac, PTR_ERR(hmac_tfm)); rc = PTR_ERR(hmac_tfm); hmac_tfm = NULL; return ERR_PTR(rc); } } desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac_tfm), GFP_KERNEL); if (!desc) return ERR_PTR(-ENOMEM); desc->tfm = hmac_tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len); if (rc) goto out; rc = crypto_shash_init(desc); out: if (rc) { kfree(desc); return ERR_PTR(rc); } return desc; }
static int p8_ghash_init_tfm(struct crypto_tfm *tfm) { const char *alg; struct crypto_shash *fallback; struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm); struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); if (!(alg = crypto_tfm_alg_name(tfm))) { printk(KERN_ERR "Failed to get algorithm name.\n"); return -ENOENT; } fallback = crypto_alloc_shash(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(fallback)) { printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", alg, PTR_ERR(fallback)); return PTR_ERR(fallback); } printk(KERN_INFO "Using '%s' as fallback implementation.\n", crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback))); crypto_shash_set_flags(fallback, crypto_shash_get_flags((struct crypto_shash *) tfm)); ctx->fallback = fallback; shash_tfm->descsize = sizeof(struct p8_ghash_desc_ctx) + crypto_shash_descsize(fallback); return 0; }
/* * Digest the module contents. */ static struct public_key_signature *mod_make_digest(enum pkey_hash_algo hash, const void *mod, unsigned long modlen) { struct public_key_signature *pks; struct crypto_shash *tfm; struct shash_desc *desc; size_t digest_size, desc_size; int ret; pr_devel("==>%s()\n", __func__); /* Allocate the hashing algorithm we're going to need and find out how * big the hash operational data will be. */ tfm = crypto_alloc_shash(pkey_hash_algo[hash], 0, 0); if (IS_ERR(tfm)) return (PTR_ERR(tfm) == -ENOENT) ? ERR_PTR(-ENOPKG) : ERR_CAST(tfm); desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); digest_size = crypto_shash_digestsize(tfm); /* We allocate the hash operational data storage on the end of our * context data and the digest output buffer on the end of that. */ ret = -ENOMEM; pks = kzalloc(digest_size + sizeof(*pks) + desc_size, GFP_KERNEL); if (!pks) goto error_no_pks; pks->pkey_hash_algo = hash; pks->digest = (u8 *)pks + sizeof(*pks) + desc_size; pks->digest_size = digest_size; desc = (void *)pks + sizeof(*pks); desc->tfm = tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; ret = crypto_shash_init(desc); if (ret < 0) goto error; ret = crypto_shash_finup(desc, mod, modlen, pks->digest); if (ret < 0) goto error; crypto_free_shash(tfm); pr_devel("<==%s() = ok\n", __func__); return pks; error: kfree(pks); error_no_pks: crypto_free_shash(tfm); pr_devel("<==%s() = %d\n", __func__, ret); return ERR_PTR(ret); }
static struct shash_desc *init_desc(char type) { long rc; char *algo; struct crypto_shash **tfm; struct shash_desc *desc; if (type == EVM_XATTR_HMAC) { tfm = &hmac_tfm; algo = evm_hmac; } else { tfm = &hash_tfm; algo = evm_hash; } if (*tfm == NULL) { mutex_lock(&mutex); if (*tfm) goto out; *tfm = crypto_alloc_shash(algo, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(*tfm)) { rc = PTR_ERR(*tfm); pr_err("Can not allocate %s (reason: %ld)\n", algo, rc); *tfm = NULL; mutex_unlock(&mutex); return ERR_PTR(rc); } if (type == EVM_XATTR_HMAC) { rc = crypto_shash_setkey(*tfm, evmkey, evmkey_len); if (rc) { crypto_free_shash(*tfm); *tfm = NULL; mutex_unlock(&mutex); return ERR_PTR(rc); } } out: mutex_unlock(&mutex); } desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm), GFP_KERNEL); if (!desc) return ERR_PTR(-ENOMEM); desc->tfm = *tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; rc = crypto_shash_init(desc); if (rc) { kfree(desc); return ERR_PTR(rc); } return desc; }
static struct sdesc *init_sdesc(struct crypto_shash *alg) { struct sdesc *sdesc; int size; size = sizeof(struct shash_desc) + crypto_shash_descsize(alg); sdesc = kmalloc(size, GFP_KERNEL); if (!sdesc) return ERR_PTR(-ENOMEM); sdesc->shash.tfm = alg; sdesc->shash.flags = 0x0; return sdesc; }
static int symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash) { int rc; unsigned int size; struct crypto_shash *md5; struct sdesc *sdescmd5; md5 = crypto_alloc_shash("md5", 0, 0); if (IS_ERR(md5)) { rc = PTR_ERR(md5); cERROR(1, "%s: Crypto md5 allocation error %d", __func__, rc); return rc; } size = sizeof(struct shash_desc) + crypto_shash_descsize(md5); sdescmd5 = kmalloc(size, GFP_KERNEL); if (!sdescmd5) { rc = -ENOMEM; cERROR(1, "%s: Memory allocation failure", __func__); goto symlink_hash_err; } sdescmd5->shash.tfm = md5; sdescmd5->shash.flags = 0x0; rc = crypto_shash_init(&sdescmd5->shash); if (rc) { cERROR(1, "%s: Could not init md5 shash", __func__); goto symlink_hash_err; } rc = crypto_shash_update(&sdescmd5->shash, link_str, link_len); if (rc) { cERROR(1, "%s: Could not update with link_str", __func__); goto symlink_hash_err; } rc = crypto_shash_final(&sdescmd5->shash, md5_hash); if (rc) cERROR(1, "%s: Could not generate md5 hash", __func__); symlink_hash_err: crypto_free_shash(md5); kfree(sdescmd5); return rc; }
static int smb3_crypto_shash_allocate(struct TCP_Server_Info *server) { unsigned int size; int rc; if (server->secmech.sdesccmacaes != NULL) return 0; /* already allocated */ rc = smb2_crypto_shash_allocate(server); if (rc) return rc; server->secmech.cmacaes = crypto_alloc_shash("cmac(aes)", 0, 0); if (IS_ERR(server->secmech.cmacaes)) { cifs_dbg(VFS, "could not allocate crypto cmac-aes"); kfree(server->secmech.sdeschmacsha256); server->secmech.sdeschmacsha256 = NULL; crypto_free_shash(server->secmech.hmacsha256); server->secmech.hmacsha256 = NULL; rc = PTR_ERR(server->secmech.cmacaes); server->secmech.cmacaes = NULL; return rc; } size = sizeof(struct shash_desc) + crypto_shash_descsize(server->secmech.cmacaes); server->secmech.sdesccmacaes = kmalloc(size, GFP_KERNEL); if (!server->secmech.sdesccmacaes) { cifs_dbg(VFS, "%s: Can't alloc cmacaes\n", __func__); kfree(server->secmech.sdeschmacsha256); server->secmech.sdeschmacsha256 = NULL; crypto_free_shash(server->secmech.hmacsha256); crypto_free_shash(server->secmech.cmacaes); server->secmech.hmacsha256 = NULL; server->secmech.cmacaes = NULL; return -ENOMEM; } server->secmech.sdesccmacaes->shash.tfm = server->secmech.cmacaes; server->secmech.sdesccmacaes->shash.flags = 0x0; return 0; }
/** * RFC 6056, Algorithm 3. * * Just to clarify: Because our port pool is a somewhat complex data structure * (rather than a simple range), ephemerals are now handled by pool4. This * function has been stripped now to only consist of F(). (Hence the name.) */ int rfc6056_f(const struct tuple *tuple6, __u8 fields, unsigned int *result) { union { __be32 as32[4]; __u8 as8[16]; } md5_result; struct shash_desc *desc; int error = 0; desc = __wkmalloc("shash desc", sizeof(struct shash_desc) + crypto_shash_descsize(shash), GFP_ATOMIC); if (!desc) return -ENOMEM; desc->tfm = shash; desc->flags = 0; error = crypto_shash_init(desc); if (error) { log_debug("crypto_hash_init() failed. Errcode: %d", error); goto end; } error = hash_tuple(desc, fields, tuple6); if (error) { log_debug("crypto_hash_update() failed. Errcode: %d", error); goto end; } error = crypto_shash_final(desc, md5_result.as8); if (error) { log_debug("crypto_hash_digest() failed. Errcode: %d", error); goto end; } *result = (__force __u32)md5_result.as32[3]; /* Fall through. */ end: __wkfree("shash desc", desc); return error; }
static int calc_hash(const u8 *src, int src_len, u8 *out, struct device *dev) { struct crypto_shash *shash; struct sdesc *desc; int size; int ret = -EFAULT; shash = crypto_alloc_shash(HASH_ALG, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(shash)) { dev_err(dev, "%s: Error. crypto_alloc_shash.\n", __func__); goto err_shash; } size = sizeof(struct shash_desc) + crypto_shash_descsize(shash); desc = kmalloc(size, GFP_KERNEL); if (!desc) { dev_err(dev, "%s: Error. No enough mem for Desc.\n", __func__); ret = -ENOMEM; goto err_desc; } desc->shash.tfm = shash; desc->shash.flags = 0x00; if (crypto_shash_digest(&desc->shash, src, src_len, out)) { dev_err(dev, "%s: Error. generate hash.\n", __func__); goto err_generate; } ret = 0; err_generate: kfree(desc); err_desc: crypto_free_shash(shash); err_shash: return ret; }
/* produce a md4 message digest from data of length n bytes */ int mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len) { int rc; unsigned int size; struct crypto_shash *md4; struct sdesc *sdescmd4; md4 = crypto_alloc_shash("md4", 0, 0); if (IS_ERR(md4)) { cERROR(1, "%s: Crypto md4 allocation error %d\n", __func__, rc); return PTR_ERR(md4); } size = sizeof(struct shash_desc) + crypto_shash_descsize(md4); sdescmd4 = kmalloc(size, GFP_KERNEL); if (!sdescmd4) { rc = -ENOMEM; cERROR(1, "%s: Memory allocation failure\n", __func__); goto mdfour_err; } sdescmd4->shash.tfm = md4; sdescmd4->shash.flags = 0x0; rc = crypto_shash_init(&sdescmd4->shash); if (rc) { cERROR(1, "%s: Could not init md4 shash\n", __func__); goto mdfour_err; } crypto_shash_update(&sdescmd4->shash, link_str, link_len); rc = crypto_shash_final(&sdescmd4->shash, md4_hash); mdfour_err: crypto_free_shash(md4); kfree(sdescmd4); return rc; }
/* * Check the signature on a certificate using the provided public key */ static int x509_check_signature(const struct public_key *pub, const struct x509_certificate *cert) { struct public_key_signature *sig; struct crypto_shash *tfm; struct shash_desc *desc; size_t digest_size, desc_size; int ret; pr_devel("==>%s()\n", __func__); /* Allocate the hashing algorithm we're going to need and find out how * big the hash operational data will be. */ tfm = crypto_alloc_shash(pkey_hash_algo[cert->sig_hash_algo], 0, 0); if (IS_ERR(tfm)) return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm); desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); digest_size = crypto_shash_digestsize(tfm); /* We allocate the hash operational data storage on the end of our * context data. */ ret = -ENOMEM; sig = kzalloc(sizeof(*sig) + desc_size + digest_size, GFP_KERNEL); if (!sig) goto error_no_sig; sig->pkey_hash_algo = cert->sig_hash_algo; sig->digest = (u8 *)sig + sizeof(*sig) + desc_size; sig->digest_size = digest_size; desc = (void *)sig + sizeof(*sig); desc->tfm = tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; ret = crypto_shash_init(desc); if (ret < 0) goto error; ret = -ENOMEM; sig->rsa.s = mpi_read_raw_data(cert->sig, cert->sig_size); if (!sig->rsa.s) goto error; ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, sig->digest); if (ret < 0) goto error_mpi; ret = pub->algo->verify_signature(pub, sig); pr_debug("Cert Verification: %d\n", ret); error_mpi: mpi_free(sig->rsa.s); error: kfree(sig); error_no_sig: crypto_free_shash(tfm); pr_devel("<==%s() = %d\n", __func__, ret); return ret; }
/* * verify a module's signature */ int module_verify_signature(struct module_verify_data *mvdata, int *_gpgsig_ok) { const struct elf_note *note; struct crypto_shash *tfm; const Elf_Shdr *sechdrs = mvdata->sections; const char *secstrings = mvdata->secstrings; const char *sig; unsigned note_size, sig_size, note_namesz; int loop, ret; _debug("looking for sig section '%s'\n", modsign_note_section); for (loop = 1; loop < mvdata->nsects; loop++) { switch (sechdrs[loop].sh_type) { case SHT_NOTE: if (strcmp(mvdata->secstrings + sechdrs[loop].sh_name, modsign_note_section) == 0) mvdata->sig_index = loop; break; } } if (mvdata->sig_index <= 0) goto no_signature; note = mvdata->buffer + sechdrs[mvdata->sig_index].sh_offset; note_size = sechdrs[mvdata->sig_index].sh_size; /* there should be one note of the appropriate type */ if (note_size < sizeof(*note) + 2 * 4) goto format_error_no_free; note_namesz = note->n_namesz; sig_size = note->n_descsz; if (note_namesz != sizeof(modsign_note_name)) goto format_error_no_free; if (note->n_type != MODSIGN_NOTE_TYPE) goto format_error_no_free; if (memcmp(note + 1, modsign_note_name, note_namesz) != 0) goto format_error_no_free; sig = (void *)(note + 1) + roundup(note_namesz, 4); _debug("sig in section %d (size %d)\n", mvdata->sig_index, sig_size); _debug("%02x%02x%02x%02x%02x%02x%02x%02x\n", sig[0], sig[1], sig[2], sig[3], sig[4], sig[5], sig[6], sig[7]); /* produce a canonicalisation map for the sections */ ret = module_verify_canonicalise(mvdata); if (ret < 0) return ret; /* grab an SHA1 transformation context * - !!! if this tries to load the sha1.ko module, we will deadlock!!! */ tfm = crypto_alloc_shash("sha1", 0, 0); if (IS_ERR(tfm)) { printk(KERN_ERR "Couldn't load module - SHA1 transform unavailable\n"); return -EPERM; } mvdata->hash = kmalloc(sizeof(*mvdata->hash) + crypto_shash_descsize(tfm), GFP_KERNEL); if (!mvdata->hash) { crypto_free_shash(tfm); return -ENOMEM; } mvdata->hash->tfm = tfm; mvdata->hash->flags = CRYPTO_TFM_REQ_MAY_SLEEP; ret = crypto_shash_init(mvdata->hash); if (ret < 0) { crypto_free_shash(mvdata->hash->tfm); kfree(mvdata->hash); return -ENOMEM; } #ifdef MODSIGN_DEBUG mvdata->xcsum = 0; #endif /* load data from each relevant section into the digest */ for (loop = 0; loop < mvdata->ncanon; loop++) { int sect = mvdata->canonlist[loop]; unsigned long sh_type = sechdrs[sect].sh_type; unsigned long sh_info = sechdrs[sect].sh_info; unsigned long sh_size = sechdrs[sect].sh_size; unsigned long sh_flags = sechdrs[sect].sh_flags; const char *sh_name = secstrings + sechdrs[sect].sh_name; const void *data = mvdata->buffer + sechdrs[sect].sh_offset; #ifdef MODSIGN_DEBUG mvdata->csum = 0; #endif /* it would be nice to include relocation sections, but the act * of adding a signature to the module seems changes their * contents, because the symtab gets changed when sections are * added or removed */ if (sh_type == SHT_REL || sh_type == SHT_RELA) { uint32_t xsh_info = mvdata->canonmap[sh_info]; crypto_digest_update_data(mvdata, sh_name, strlen(sh_name)); crypto_digest_update_val(mvdata, sechdrs[sect].sh_type); crypto_digest_update_val(mvdata, sechdrs[sect].sh_flags); crypto_digest_update_val(mvdata, sechdrs[sect].sh_size); crypto_digest_update_val(mvdata, sechdrs[sect].sh_addralign); crypto_digest_update_val(mvdata, xsh_info); if (sh_type == SHT_RELA) ret = extract_elf_rela( mvdata, sect, data, sh_size / sizeof(Elf_Rela), sh_name); else ret = extract_elf_rel( mvdata, sect, data, sh_size / sizeof(Elf_Rel), sh_name); if (ret < 0) goto format_error; continue; } /* include the headers of BSS sections */ if (sh_type == SHT_NOBITS && sh_flags & SHF_ALLOC) { crypto_digest_update_data(mvdata, sh_name, strlen(sh_name)); crypto_digest_update_val(mvdata, sechdrs[sect].sh_type); crypto_digest_update_val(mvdata, sechdrs[sect].sh_flags); crypto_digest_update_val(mvdata, sechdrs[sect].sh_size); crypto_digest_update_val(mvdata, sechdrs[sect].sh_addralign); goto digested; } /* include allocatable loadable sections */ if (sh_type != SHT_NOBITS && sh_flags & SHF_ALLOC) goto include_section; continue; include_section: crypto_digest_update_data(mvdata, sh_name, strlen(sh_name)); crypto_digest_update_val(mvdata, sechdrs[sect].sh_type); crypto_digest_update_val(mvdata, sechdrs[sect].sh_flags); crypto_digest_update_val(mvdata, sechdrs[sect].sh_size); crypto_digest_update_val(mvdata, sechdrs[sect].sh_addralign); crypto_digest_update_data(mvdata, data, sh_size); digested: _debug("%08zx %02x digested the %s section, size %ld\n", mvdata->signed_size, mvdata->csum, sh_name, sh_size); } _debug("Contributed %zu bytes to the digest (csum 0x%02x)\n", mvdata->signed_size, mvdata->xcsum); /* do the actual signature verification */ ret = ksign_verify_signature(sig, sig_size, mvdata->hash); crypto_free_shash(mvdata->hash->tfm); kfree(mvdata->hash); _debug("verify-sig : %d\n", ret); switch (ret) { case 0: /* good signature */ *_gpgsig_ok = 1; break; case -EKEYREJECTED: /* signature mismatch or number format error */ printk(KERN_ERR "Module signature verification failed\n"); break; case -ENOKEY: /* signed, but we don't have the public key */ printk(KERN_ERR "Module signed with unknown public key\n"); break; default: /* other error (probably ENOMEM) */ break; } if (ret && badsigok) { printk(KERN_ERR "Bad signature ignored by cmdline\n"); ret = 0; } return ret; format_error: crypto_free_shash(mvdata->hash->tfm); kfree(mvdata->hash); format_error_no_free: printk(KERN_ERR "Module format error encountered\n"); return -ELIBBAD; /* deal with the case of an unsigned module */ no_signature: _debug("no signature found\n"); if (!signedonly) return 0; printk(KERN_ERR "An attempt to load unsigned module was rejected\n"); return -EKEYREJECTED; }
/* * Set up the signature parameters in an X.509 certificate. This involves * digesting the signed data and extracting the signature. */ int x509_get_sig_params(struct x509_certificate *cert) { struct public_key_signature *sig = cert->sig; struct crypto_shash *tfm; struct shash_desc *desc; size_t desc_size; int ret; pr_devel("==>%s()\n", __func__); if (!cert->pub->pkey_algo) cert->unsupported_key = true; if (!sig->pkey_algo) cert->unsupported_sig = true; /* We check the hash if we can - even if we can't then verify it */ if (!sig->hash_algo) { cert->unsupported_sig = true; return 0; } sig->s = kmemdup(cert->raw_sig, cert->raw_sig_size, GFP_KERNEL); if (!sig->s) return -ENOMEM; sig->s_size = cert->raw_sig_size; /* Allocate the hashing algorithm we're going to need and find out how * big the hash operational data will be. */ tfm = crypto_alloc_shash(sig->hash_algo, 0, 0); if (IS_ERR(tfm)) { if (PTR_ERR(tfm) == -ENOENT) { cert->unsupported_sig = true; return 0; } return PTR_ERR(tfm); } desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); sig->digest_size = crypto_shash_digestsize(tfm); ret = -ENOMEM; sig->digest = kmalloc(sig->digest_size, GFP_KERNEL); if (!sig->digest) goto error; desc = kzalloc(desc_size, GFP_KERNEL); if (!desc) goto error; desc->tfm = tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, sig->digest); if (ret < 0) goto error_2; ret = is_hash_blacklisted(sig->digest, sig->digest_size, "tbs"); if (ret == -EKEYREJECTED) { pr_err("Cert %*phN is blacklisted\n", sig->digest_size, sig->digest); cert->blacklisted = true; ret = 0; } error_2: kfree(desc); error: crypto_free_shash(tfm); pr_devel("<==%s() = %d\n", __func__, ret); return ret; }
static int chap_server_compute_md5( struct iscsi_conn *conn, struct iscsi_node_auth *auth, char *nr_in_ptr, char *nr_out_ptr, unsigned int *nr_out_len) { unsigned long id; unsigned char id_as_uchar; unsigned char digest[MD5_SIGNATURE_SIZE]; unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2]; unsigned char identifier[10], *challenge = NULL; unsigned char *challenge_binhex = NULL; unsigned char client_digest[MD5_SIGNATURE_SIZE]; unsigned char server_digest[MD5_SIGNATURE_SIZE]; unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH]; size_t compare_len; struct iscsi_chap *chap = conn->auth_protocol; struct crypto_shash *tfm = NULL; struct shash_desc *desc = NULL; int auth_ret = -1, ret, challenge_len; memset(identifier, 0, 10); memset(chap_n, 0, MAX_CHAP_N_SIZE); memset(chap_r, 0, MAX_RESPONSE_LENGTH); memset(digest, 0, MD5_SIGNATURE_SIZE); memset(response, 0, MD5_SIGNATURE_SIZE * 2 + 2); memset(client_digest, 0, MD5_SIGNATURE_SIZE); memset(server_digest, 0, MD5_SIGNATURE_SIZE); challenge = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL); if (!challenge) { pr_err("Unable to allocate challenge buffer\n"); goto out; } challenge_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL); if (!challenge_binhex) { pr_err("Unable to allocate challenge_binhex buffer\n"); goto out; } /* * Extract CHAP_N. */ if (extract_param(nr_in_ptr, "CHAP_N", MAX_CHAP_N_SIZE, chap_n, &type) < 0) { pr_err("Could not find CHAP_N.\n"); goto out; } if (type == HEX) { pr_err("Could not find CHAP_N.\n"); goto out; } /* Include the terminating NULL in the compare */ compare_len = strlen(auth->userid) + 1; if (strncmp(chap_n, auth->userid, compare_len) != 0) { pr_err("CHAP_N values do not match!\n"); goto out; } pr_debug("[server] Got CHAP_N=%s\n", chap_n); /* * Extract CHAP_R. */ if (extract_param(nr_in_ptr, "CHAP_R", MAX_RESPONSE_LENGTH, chap_r, &type) < 0) { pr_err("Could not find CHAP_R.\n"); goto out; } if (type != HEX) { pr_err("Could not find CHAP_R.\n"); goto out; } pr_debug("[server] Got CHAP_R=%s\n", chap_r); chap_string_to_hex(client_digest, chap_r, strlen(chap_r)); tfm = crypto_alloc_shash("md5", 0, 0); if (IS_ERR(tfm)) { tfm = NULL; pr_err("Unable to allocate struct crypto_shash\n"); goto out; } desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL); if (!desc) { pr_err("Unable to allocate struct shash_desc\n"); goto out; } desc->tfm = tfm; desc->flags = 0; ret = crypto_shash_init(desc); if (ret < 0) { pr_err("crypto_shash_init() failed\n"); goto out; } ret = crypto_shash_update(desc, &chap->id, 1); if (ret < 0) { pr_err("crypto_shash_update() failed for id\n"); goto out; } ret = crypto_shash_update(desc, (char *)&auth->password, strlen(auth->password)); if (ret < 0) { pr_err("crypto_shash_update() failed for password\n"); goto out; } ret = crypto_shash_finup(desc, chap->challenge, CHAP_CHALLENGE_LENGTH, server_digest); if (ret < 0) { pr_err("crypto_shash_finup() failed for challenge\n"); goto out; } chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE); pr_debug("[server] MD5 Server Digest: %s\n", response); if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) { pr_debug("[server] MD5 Digests do not match!\n\n"); goto out; } else pr_debug("[server] MD5 Digests match, CHAP connection" " successful.\n\n"); /* * One way authentication has succeeded, return now if mutual * authentication is not enabled. */ if (!auth->authenticate_target) { auth_ret = 0; goto out; } /* * Get CHAP_I. */ if (extract_param(nr_in_ptr, "CHAP_I", 10, identifier, &type) < 0) { pr_err("Could not find CHAP_I.\n"); goto out; } if (type == HEX) ret = kstrtoul(&identifier[2], 0, &id); else ret = kstrtoul(identifier, 0, &id); if (ret < 0) { pr_err("kstrtoul() failed for CHAP identifier: %d\n", ret); goto out; } if (id > 255) { pr_err("chap identifier: %lu greater than 255\n", id); goto out; } /* * RFC 1994 says Identifier is no more than octet (8 bits). */ pr_debug("[server] Got CHAP_I=%lu\n", id); /* * Get CHAP_C. */ if (extract_param(nr_in_ptr, "CHAP_C", CHAP_CHALLENGE_STR_LEN, challenge, &type) < 0) { pr_err("Could not find CHAP_C.\n"); goto out; } if (type != HEX) { pr_err("Could not find CHAP_C.\n"); goto out; } pr_debug("[server] Got CHAP_C=%s\n", challenge); challenge_len = chap_string_to_hex(challenge_binhex, challenge, strlen(challenge)); if (!challenge_len) { pr_err("Unable to convert incoming challenge\n"); goto out; } if (challenge_len > 1024) { pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n"); goto out; } /* * During mutual authentication, the CHAP_C generated by the * initiator must not match the original CHAP_C generated by * the target. */ if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) { pr_err("initiator CHAP_C matches target CHAP_C, failing" " login attempt\n"); goto out; } /* * Generate CHAP_N and CHAP_R for mutual authentication. */ ret = crypto_shash_init(desc); if (ret < 0) { pr_err("crypto_shash_init() failed\n"); goto out; } /* To handle both endiannesses */ id_as_uchar = id; ret = crypto_shash_update(desc, &id_as_uchar, 1); if (ret < 0) { pr_err("crypto_shash_update() failed for id\n"); goto out; } ret = crypto_shash_update(desc, auth->password_mutual, strlen(auth->password_mutual)); if (ret < 0) { pr_err("crypto_shash_update() failed for" " password_mutual\n"); goto out; } /* * Convert received challenge to binary hex. */ ret = crypto_shash_finup(desc, challenge_binhex, challenge_len, digest); if (ret < 0) { pr_err("crypto_shash_finup() failed for ma challenge\n"); goto out; } /* * Generate CHAP_N and CHAP_R. */ *nr_out_len = sprintf(nr_out_ptr, "CHAP_N=%s", auth->userid_mutual); *nr_out_len += 1; pr_debug("[server] Sending CHAP_N=%s\n", auth->userid_mutual); /* * Convert response from binary hex to ascii hext. */ chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE); *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s", response); *nr_out_len += 1; pr_debug("[server] Sending CHAP_R=0x%s\n", response); auth_ret = 0; out: kzfree(desc); if (tfm) crypto_free_shash(tfm); kfree(challenge); kfree(challenge_binhex); return auth_ret; }
/* * Digest the contents of the PE binary, leaving out the image checksum and the * certificate data block. */ static int pefile_digest_pe(const void *pebuf, unsigned int pelen, struct pefile_context *ctx) { struct crypto_shash *tfm; struct shash_desc *desc; size_t digest_size, desc_size; void *digest; int ret; kenter(",%s", ctx->digest_algo); /* Allocate the hashing algorithm we're going to need and find out how * big the hash operational data will be. */ tfm = crypto_alloc_shash(ctx->digest_algo, 0, 0); if (IS_ERR(tfm)) return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm); desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); digest_size = crypto_shash_digestsize(tfm); if (digest_size != ctx->digest_len) { pr_debug("Digest size mismatch (%zx != %x)\n", digest_size, ctx->digest_len); ret = -EBADMSG; goto error_no_desc; } pr_debug("Digest: desc=%zu size=%zu\n", desc_size, digest_size); ret = -ENOMEM; desc = kzalloc(desc_size + digest_size, GFP_KERNEL); if (!desc) goto error_no_desc; desc->tfm = tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; ret = crypto_shash_init(desc); if (ret < 0) goto error; ret = pefile_digest_pe_contents(pebuf, pelen, ctx, desc); if (ret < 0) goto error; digest = (void *)desc + desc_size; ret = crypto_shash_final(desc, digest); if (ret < 0) goto error; pr_debug("Digest calc = [%*ph]\n", ctx->digest_len, digest); /* Check that the PE file digest matches that in the MSCODE part of the * PKCS#7 certificate. */ if (memcmp(digest, ctx->digest, ctx->digest_len) != 0) { pr_debug("Digest mismatch\n"); ret = -EKEYREJECTED; } else { pr_debug("The digests match!\n"); } error: kfree(desc); error_no_desc: crypto_free_shash(tfm); kleave(" = %d", ret); return ret; }
/** * tb_domain_challenge_switch_key() - Challenge and approve switch * @tb: Domain the switch belongs to * @sw: Switch to approve * * For switches that support secure connect, this function generates * random challenge and sends it to the switch. The switch responds to * this and if the response matches our random challenge, the switch is * approved and connected. * * Return: %0 on success and negative errno in case of failure. */ int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw) { u8 challenge[TB_SWITCH_KEY_SIZE]; u8 response[TB_SWITCH_KEY_SIZE]; u8 hmac[TB_SWITCH_KEY_SIZE]; struct tb_switch *parent_sw; struct crypto_shash *tfm; struct shash_desc *shash; int ret; if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key) return -EPERM; /* The parent switch must be authorized before this one */ parent_sw = tb_to_switch(sw->dev.parent); if (!parent_sw || !parent_sw->authorized) return -EINVAL; get_random_bytes(challenge, sizeof(challenge)); ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response); if (ret) return ret; tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); if (IS_ERR(tfm)) return PTR_ERR(tfm); ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE); if (ret) goto err_free_tfm; shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm), GFP_KERNEL); if (!shash) { ret = -ENOMEM; goto err_free_tfm; } shash->tfm = tfm; shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP; memset(hmac, 0, sizeof(hmac)); ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac); if (ret) goto err_free_shash; /* The returned HMAC must match the one we calculated */ if (memcmp(response, hmac, sizeof(hmac))) { ret = -EKEYREJECTED; goto err_free_shash; } crypto_free_shash(tfm); kfree(shash); return tb->cm_ops->approve_switch(tb, sw); err_free_shash: kfree(shash); err_free_tfm: crypto_free_shash(tfm); return ret; }
int cifs_crypto_shash_allocate(struct TCP_Server_Info *server) { int rc; unsigned int size; server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0); if (IS_ERR(server->secmech.hmacmd5)) { cifs_dbg(VFS, "could not allocate crypto hmacmd5\n"); return PTR_ERR(server->secmech.hmacmd5); } server->secmech.md5 = crypto_alloc_shash("md5", 0, 0); if (IS_ERR(server->secmech.md5)) { cifs_dbg(VFS, "could not allocate crypto md5\n"); rc = PTR_ERR(server->secmech.md5); goto crypto_allocate_md5_fail; } server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0); if (IS_ERR(server->secmech.hmacsha256)) { cifs_dbg(VFS, "could not allocate crypto hmacsha256\n"); rc = PTR_ERR(server->secmech.hmacsha256); goto crypto_allocate_hmacsha256_fail; } server->secmech.cmacaes = crypto_alloc_shash("cmac(aes)", 0, 0); if (IS_ERR(server->secmech.cmacaes)) { cifs_dbg(VFS, "could not allocate crypto cmac-aes"); rc = PTR_ERR(server->secmech.cmacaes); goto crypto_allocate_cmacaes_fail; } size = sizeof(struct shash_desc) + crypto_shash_descsize(server->secmech.hmacmd5); server->secmech.sdeschmacmd5 = kmalloc(size, GFP_KERNEL); if (!server->secmech.sdeschmacmd5) { rc = -ENOMEM; goto crypto_allocate_hmacmd5_sdesc_fail; } server->secmech.sdeschmacmd5->shash.tfm = server->secmech.hmacmd5; server->secmech.sdeschmacmd5->shash.flags = 0x0; size = sizeof(struct shash_desc) + crypto_shash_descsize(server->secmech.md5); server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL); if (!server->secmech.sdescmd5) { rc = -ENOMEM; goto crypto_allocate_md5_sdesc_fail; } server->secmech.sdescmd5->shash.tfm = server->secmech.md5; server->secmech.sdescmd5->shash.flags = 0x0; size = sizeof(struct shash_desc) + crypto_shash_descsize(server->secmech.hmacsha256); server->secmech.sdeschmacsha256 = kmalloc(size, GFP_KERNEL); if (!server->secmech.sdeschmacsha256) { rc = -ENOMEM; goto crypto_allocate_hmacsha256_sdesc_fail; } server->secmech.sdeschmacsha256->shash.tfm = server->secmech.hmacsha256; server->secmech.sdeschmacsha256->shash.flags = 0x0; size = sizeof(struct shash_desc) + crypto_shash_descsize(server->secmech.cmacaes); server->secmech.sdesccmacaes = kmalloc(size, GFP_KERNEL); if (!server->secmech.sdesccmacaes) { cifs_dbg(VFS, "%s: Can't alloc cmacaes\n", __func__); rc = -ENOMEM; goto crypto_allocate_cmacaes_sdesc_fail; } server->secmech.sdesccmacaes->shash.tfm = server->secmech.cmacaes; server->secmech.sdesccmacaes->shash.flags = 0x0; return 0; crypto_allocate_cmacaes_sdesc_fail: kfree(server->secmech.sdeschmacsha256); crypto_allocate_hmacsha256_sdesc_fail: kfree(server->secmech.sdescmd5); crypto_allocate_md5_sdesc_fail: kfree(server->secmech.sdeschmacmd5); crypto_allocate_hmacmd5_sdesc_fail: crypto_free_shash(server->secmech.cmacaes); crypto_allocate_cmacaes_fail: crypto_free_shash(server->secmech.hmacsha256); crypto_allocate_hmacsha256_fail: crypto_free_shash(server->secmech.md5); crypto_allocate_md5_fail: crypto_free_shash(server->secmech.hmacmd5); return rc; }
/* * Digest the relevant parts of the PKCS#7 data */ static int pkcs7_digest(struct pkcs7_message *pkcs7, struct pkcs7_signed_info *sinfo) { struct crypto_shash *tfm; struct shash_desc *desc; size_t digest_size, desc_size; void *digest; int ret; kenter(",%u,%u", sinfo->index, sinfo->sig.pkey_hash_algo); if (sinfo->sig.pkey_hash_algo >= PKEY_HASH__LAST || !hash_algo_name[sinfo->sig.pkey_hash_algo]) return -ENOPKG; /* Allocate the hashing algorithm we're going to need and find out how * big the hash operational data will be. */ tfm = crypto_alloc_shash(hash_algo_name[sinfo->sig.pkey_hash_algo], 0, 0); if (IS_ERR(tfm)) return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm); desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); sinfo->sig.digest_size = digest_size = crypto_shash_digestsize(tfm); ret = -ENOMEM; digest = kzalloc(ALIGN(digest_size, __alignof__(*desc)) + desc_size, GFP_KERNEL); if (!digest) goto error_no_desc; desc = PTR_ALIGN(digest + digest_size, __alignof__(*desc)); desc->tfm = tfm; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; /* Digest the message [RFC2315 9.3] */ ret = crypto_shash_init(desc); if (ret < 0) goto error; ret = crypto_shash_finup(desc, pkcs7->data, pkcs7->data_len, digest); if (ret < 0) goto error; pr_devel("MsgDigest = [%*ph]\n", 8, digest); /* However, if there are authenticated attributes, there must be a * message digest attribute amongst them which corresponds to the * digest we just calculated. */ if (sinfo->authattrs) { u8 tag; if (!sinfo->msgdigest) { pr_warn("Sig %u: No messageDigest\n", sinfo->index); ret = -EKEYREJECTED; goto error; } if (sinfo->msgdigest_len != sinfo->sig.digest_size) { pr_debug("Sig %u: Invalid digest size (%u)\n", sinfo->index, sinfo->msgdigest_len); ret = -EBADMSG; goto error; } if (memcmp(digest, sinfo->msgdigest, sinfo->msgdigest_len) != 0) { pr_debug("Sig %u: Message digest doesn't match\n", sinfo->index); ret = -EKEYREJECTED; goto error; } /* We then calculate anew, using the authenticated attributes * as the contents of the digest instead. Note that we need to * convert the attributes from a CONT.0 into a SET before we * hash it. */ memset(digest, 0, sinfo->sig.digest_size); ret = crypto_shash_init(desc); if (ret < 0) goto error; tag = ASN1_CONS_BIT | ASN1_SET; ret = crypto_shash_update(desc, &tag, 1); if (ret < 0) goto error; ret = crypto_shash_finup(desc, sinfo->authattrs, sinfo->authattrs_len, digest); if (ret < 0) goto error; pr_devel("AADigest = [%*ph]\n", 8, digest); } sinfo->sig.digest = digest; digest = NULL; error: kfree(digest); error_no_desc: crypto_free_shash(tfm); kleave(" = %d", ret); return ret; }
/* Calculate and store the digest of segments */ static int kexec_calculate_store_digests(struct kimage *image) { struct crypto_shash *tfm; struct shash_desc *desc; int ret = 0, i, j, zero_buf_sz, sha_region_sz; size_t desc_size, nullsz; char *digest; void *zero_buf; struct kexec_sha_region *sha_regions; struct purgatory_info *pi = &image->purgatory_info; zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT); zero_buf_sz = PAGE_SIZE; tfm = crypto_alloc_shash("sha256", 0, 0); if (IS_ERR(tfm)) { ret = PTR_ERR(tfm); goto out; } desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); desc = kzalloc(desc_size, GFP_KERNEL); if (!desc) { ret = -ENOMEM; goto out_free_tfm; } sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region); sha_regions = vzalloc(sha_region_sz); if (!sha_regions) goto out_free_desc; desc->tfm = tfm; desc->flags = 0; ret = crypto_shash_init(desc); if (ret < 0) goto out_free_sha_regions; digest = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL); if (!digest) { ret = -ENOMEM; goto out_free_sha_regions; } for (j = i = 0; i < image->nr_segments; i++) { struct kexec_segment *ksegment; ksegment = &image->segment[i]; /* * Skip purgatory as it will be modified once we put digest * info in purgatory. */ if (ksegment->kbuf == pi->purgatory_buf) continue; ret = crypto_shash_update(desc, ksegment->kbuf, ksegment->bufsz); if (ret) break; /* * Assume rest of the buffer is filled with zero and * update digest accordingly. */ nullsz = ksegment->memsz - ksegment->bufsz; while (nullsz) { unsigned long bytes = nullsz; if (bytes > zero_buf_sz) bytes = zero_buf_sz; ret = crypto_shash_update(desc, zero_buf, bytes); if (ret) break; nullsz -= bytes; } if (ret) break; sha_regions[j].start = ksegment->mem; sha_regions[j].len = ksegment->memsz; j++; } if (!ret) { ret = crypto_shash_final(desc, digest); if (ret) goto out_free_digest; ret = kexec_purgatory_get_set_symbol(image, "sha_regions", sha_regions, sha_region_sz, 0); if (ret) goto out_free_digest; ret = kexec_purgatory_get_set_symbol(image, "sha256_digest", digest, SHA256_DIGEST_SIZE, 0); if (ret) goto out_free_digest; } out_free_digest: kfree(digest); out_free_sha_regions: vfree(sha_regions); out_free_desc: kfree(desc); out_free_tfm: kfree(tfm); out: return ret; }