/** * Calculate hash digest for the passed buffer. * * This should be used when computing the hash on a single contiguous buffer. * It combines the hash initialization, computation, and cleanup. * * \param[in] hash_alg id of hash algorithm (CFS_HASH_ALG_*) * \param[in] buf data buffer on which to compute hash * \param[in] buf_len length of \a buf in bytes * \param[in] key initial value/state for algorithm, if \a key = NULL * use default initial value * \param[in] key_len length of \a key in bytes * \param[out] hash pointer to computed hash value, if \a hash = NULL then * \a hash_len is to digest size in bytes, retval -ENOSPC * \param[in,out] hash_len size of \a hash buffer * * \retval -EINVAL \a buf, \a buf_len, \a hash_len, \a alg_id invalid * \retval -ENOENT \a hash_alg is unsupported * \retval -ENOSPC \a hash is NULL, or \a hash_len less than digest size * \retval 0 for success * \retval negative errno for other errors from lower layers. */ int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg, const void *buf, unsigned int buf_len, unsigned char *key, unsigned int key_len, unsigned char *hash, unsigned int *hash_len) { struct scatterlist sl; struct hash_desc hdesc; int err; const struct cfs_crypto_hash_type *type; if (buf == NULL || buf_len == 0 || hash_len == NULL) return -EINVAL; err = cfs_crypto_hash_alloc(hash_alg, &type, &hdesc, key, key_len); if (err != 0) return err; if (hash == NULL || *hash_len < type->cht_size) { *hash_len = type->cht_size; crypto_free_hash(hdesc.tfm); return -ENOSPC; } sg_init_one(&sl, (void *)buf, buf_len); hdesc.flags = 0; err = crypto_hash_digest(&hdesc, &sl, sl.length, hash); crypto_free_hash(hdesc.tfm); return err; }
/* If hash_len pointer is NULL - destroy descriptor. */ int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc, unsigned char *hash, unsigned int *hash_len) { int err; int size = crypto_hash_digestsize(((struct hash_desc *)hdesc)->tfm); if (hash_len == NULL) { crypto_free_hash(((struct hash_desc *)hdesc)->tfm); kfree(hdesc); return 0; } if (hash == NULL || *hash_len < size) { *hash_len = size; return -ENOSPC; } err = crypto_hash_final((struct hash_desc *) hdesc, hash); if (err < 0) { /* May be caller can fix error */ return err; } crypto_free_hash(((struct hash_desc *)hdesc)->tfm); kfree(hdesc); return err; }
void orinoco_mic_free(struct orinoco_private *priv) { if (priv->tx_tfm_mic) crypto_free_hash(priv->tx_tfm_mic); if (priv->rx_tfm_mic) crypto_free_hash(priv->rx_tfm_mic); }
/** * free resources used for digest calculation. * * digest_cleanup - * @conn: ptr to connection that made use of digests */ void digest_cleanup(struct iscsi_conn *conn) { if (conn->tx_hash.tfm) crypto_free_hash(conn->tx_hash.tfm); if (conn->rx_hash.tfm) crypto_free_hash(conn->rx_hash.tfm); }
static int DriverEnvironment_HMAC(const char *algo, const void *key, size_t key_len, const void *data, size_t data_len, void *result, size_t result_len) { struct crypto_hash *tfm; struct scatterlist sg[1]; struct hash_desc desc; int ret; tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC); if(IS_ERR(tfm)) { DE_TRACE_INT(TR_CRYPTO, "failed to allocate hash (%ld)\n", PTR_ERR(tfm)); return WIFI_ENGINE_FAILURE; } if(crypto_hash_digestsize(tfm) > result_len) { crypto_free_hash(tfm); return WIFI_ENGINE_FAILURE_INVALID_LENGTH; } sg_init_one(&sg[0], data, data_len); crypto_hash_clear_flags(tfm, ~0); ret = crypto_hash_setkey(tfm, key, key_len); if(ret != 0) { DE_TRACE_INT(TR_CRYPTO, "failed to set key (%d)\n", ret); crypto_free_hash(tfm); return WIFI_ENGINE_FAILURE; } desc.tfm = tfm; desc.flags = 0; ret = crypto_hash_digest(&desc, sg, data_len, result); if(ret != 0) { DE_TRACE_INT(TR_CRYPTO, "faild to digest (%d)\n", ret); crypto_free_hash(tfm); return WIFI_ENGINE_FAILURE; } crypto_free_hash(tfm); return WIFI_ENGINE_SUCCESS; }
static int generate_md5(char *src, char *dest, int len) { struct scatterlist sg[1]; struct crypto_hash *tfm; struct hash_desc desc; int ret = 0; tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { ret = -EAGAIN; goto out; } desc.tfm = tfm; desc.flags = 0; sg_init_table(sg, 1); sg_set_buf(sg, src, len); if (crypto_hash_digest(&desc, sg, 1, dest) ) ret = -EAGAIN; out : crypto_free_hash(tfm); return ret; }
static void crypto_rsa_exit(struct crypto_tfm *tfm) { struct crypto_rsa_ctx *ctx = crypto_tfm_ctx(tfm); cleanup_rsa(ctx); crypto_free_hash(ctx->crr_sha1_tfm); }
__be32 nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname) { struct xdr_netobj cksum; struct hash_desc desc; struct scatterlist sg; __be32 status = nfserr_resource; dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n", clname->len, clname->data); desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; desc.tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(desc.tfm)) goto out_no_tfm; cksum.len = crypto_hash_digestsize(desc.tfm); cksum.data = kmalloc(cksum.len, GFP_KERNEL); if (cksum.data == NULL) goto out; sg_init_one(&sg, clname->data, clname->len); if (crypto_hash_digest(&desc, &sg, sg.length, cksum.data)) goto out; md5_to_hex(dname, cksum.data); kfree(cksum.data); status = nfs_ok; out: crypto_free_hash(desc.tfm); out_no_tfm: return status; }
/* * Crypto machinery: hash/cipher support for the given crypto controls. */ static struct crypto_hash *dst_init_hash(struct dst_crypto_ctl *ctl, u8 *key) { int err; struct crypto_hash *hash; hash = crypto_alloc_hash(ctl->hash_algo, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hash)) { err = PTR_ERR(hash); dprintk("%s: failed to allocate hash '%s', err: %d.\n", __func__, ctl->hash_algo, err); goto err_out_exit; } ctl->crypto_attached_size = crypto_hash_digestsize(hash); if (!ctl->hash_keysize) return hash; err = crypto_hash_setkey(hash, key, ctl->hash_keysize); if (err) { dprintk("%s: failed to set key for hash '%s', err: %d.\n", __func__, ctl->hash_algo, err); goto err_out_free; } return hash; err_out_free: crypto_free_hash(hash); err_out_exit: return ERR_PTR(err); }
static int __init sha1_init(void) { struct scatterlist sg; struct crypto_hash *tfm; struct hash_desc desc; unsigned char output[SHA1_LENGTH]; unsigned char buf[10]; int i; printk(KERN_INFO "sha1: %s\n", __FUNCTION__); memset(buf, 'A', 10); memset(output, 0x00, SHA1_LENGTH); tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); desc.tfm = tfm; desc.flags = 0; sg_init_one(&sg, buf, 10); crypto_hash_init(&desc); crypto_hash_update(&desc, &sg, 10); crypto_hash_final(&desc, output); for (i = 0; i < 20; i++) { printk(KERN_ERR "%d-%d\n", output[i], i); } crypto_free_hash(tfm); return 0; }
// given a string, generate a 32-bit key int generate_key(char *pwd, u8 *pkey) { int len_pwd = strlen(pwd); struct scatterlist sg; struct crypto_hash *tfm; struct hash_desc desc; int i; unsigned char output[SHA1_LENGTH]; // key generated char *buf = kmalloc(MAX_PWD, GFP_KERNEL); // password buffer memset(buf, 0, MAX_PWD); strncpy(buf, pwd, len_pwd); tfm = crypto_alloc_hash("sha1", 1, CRYPTO_ALG_ASYNC); desc.tfm = tfm; desc.flags = 0; sg_init_one(&sg, buf, len_pwd); crypto_hash_init(&desc); crypto_hash_update(&desc, &sg, len_pwd); crypto_hash_final(&desc, output); for(i=0; i<16; i++) pkey[i] = output[i]; for(i=0; i<16; i++) pkey[i+16] = output[i]; crypto_free_hash(tfm); kfree(buf); return 0; }
static struct crypto_hash *pohmelfs_init_hash(struct pohmelfs_sb *psb) { int err; struct crypto_hash *hash; hash = crypto_alloc_hash(psb->hash_string, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hash)) { err = PTR_ERR(hash); dprintk("%s: idx: %u: failed to allocate hash '%s', err: %d.\n", __func__, psb->idx, psb->hash_string, err); goto err_out_exit; } psb->crypto_attached_size = crypto_hash_digestsize(hash); if (!psb->hash_keysize) return hash; err = crypto_hash_setkey(hash, psb->hash_key, psb->hash_keysize); if (err) { dprintk("%s: idx: %u: failed to set key for hash '%s', err: %d.\n", __func__, psb->idx, psb->hash_string, err); goto err_out_free; } return hash; err_out_free: crypto_free_hash(hash); err_out_exit: return ERR_PTR(err); }
/* checksum the plaintext data and hdrlen bytes of the token header */ s32 make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body, int body_offset, struct xdr_netobj *cksum) { struct hash_desc desc; /* XXX add to ctx? */ struct scatterlist sg[1]; int err; desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(desc.tfm)) return GSS_S_FAILURE; cksum->len = crypto_hash_digestsize(desc.tfm); desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; err = crypto_hash_init(&desc); if (err) goto out; sg_set_buf(sg, header, hdrlen); err = crypto_hash_update(&desc, sg, hdrlen); if (err) goto out; err = xdr_process_buf(body, body_offset, body->len - body_offset, checksummer, &desc); if (err) goto out; err = crypto_hash_final(&desc, cksum->data); out: crypto_free_hash(desc.tfm); return err ? GSS_S_FAILURE : 0; }
unsigned char * key_to_hash(unsigned char *key) { struct scatterlist sg; struct crypto_hash *tfm; struct hash_desc desc; unsigned char *digest= NULL; digest=kmalloc(16,GFP_KERNEL); if(IS_ERR(digest)){ printk("Error in allocating memory to Hash Key\n "); return NULL; } tfm = crypto_alloc_hash("md5", 0, 0); desc.tfm = tfm; desc.flags = 0; sg_init_one(&sg, key, 16); crypto_hash_init(&desc); crypto_hash_update(&desc, &sg, 16); crypto_hash_final(&desc, digest); crypto_free_hash(tfm); if(!digest){ printk("Error in hashing userland key\n"); return NULL; } return digest; }
/************************************************************************** * KERNEL SHA1 FUNCTION **************************************************************************/ unsigned int sbchk_sha1(char * code, unsigned int code_len, char* result) { unsigned int ret = SEC_OK; struct scatterlist sg[1]; struct crypto_hash *tfm = NULL; struct hash_desc desc; tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); if(IS_ERR(tfm)) { ret = SBCHK_BASE_HASH_INIT_FAIL; goto _exit; } /* sg_init_one(&sg[0], plaintext, length); */ sg_set_buf(&sg[0], code, code_len); desc.tfm = tfm; desc.flags = 0; memset(result, 0, 20); /* SHA1 returns 20 bytes */ if (crypto_hash_digest(&desc, sg, code_len, result)) { ret = SBCHK_BASE_HASH_DATA_FAIL; goto _exit; } crypto_free_hash(tfm); _exit: return ret; }
void pohmelfs_crypto_engine_exit(struct pohmelfs_crypto_engine *e) { if (e->hash) crypto_free_hash(e->hash); if (e->cipher) crypto_free_ablkcipher(e->cipher); kfree(e->data); }
static void dst_crypto_engine_exit(struct dst_crypto_engine *e) { if (e->hash) crypto_free_hash(e->hash); if (e->cipher) crypto_free_ablkcipher(e->cipher); dst_crypto_pages_free(e); kfree(e->data); }
static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, const char *opts) { struct crypto_cipher *essiv_tfm = NULL; struct crypto_hash *hash_tfm = NULL; u8 *salt = NULL; int err; if (!opts) { ti->error = "Digest algorithm missing for ESSIV mode"; return -EINVAL; } /* Allocate hash algorithm */ hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hash_tfm)) { ti->error = "Error initializing ESSIV hash"; err = PTR_ERR(hash_tfm); goto bad; } salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL); if (!salt) { ti->error = "Error kmallocing salt storage in ESSIV"; err = -ENOMEM; goto bad; } /* Allocate essiv_tfm */ essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(essiv_tfm)) { ti->error = "Error allocating crypto tfm for ESSIV"; err = PTR_ERR(essiv_tfm); goto bad; } if (crypto_cipher_blocksize(essiv_tfm) != crypto_ablkcipher_ivsize(cc->tfm)) { ti->error = "Block size of ESSIV cipher does " "not match IV size of block cipher"; err = -EINVAL; goto bad; } cc->iv_gen_private.essiv.salt = salt; cc->iv_gen_private.essiv.tfm = essiv_tfm; cc->iv_gen_private.essiv.hash_tfm = hash_tfm; return 0; bad: if (essiv_tfm && !IS_ERR(essiv_tfm)) crypto_free_cipher(essiv_tfm); if (hash_tfm && !IS_ERR(hash_tfm)) crypto_free_hash(hash_tfm); kfree(salt); return err; }
static void padlock_cra_exit(struct crypto_tfm *tfm) { if (ctx(tfm)->data) { free_page((unsigned long)(ctx(tfm)->data)); ctx(tfm)->data = NULL; } crypto_free_hash(ctx(tfm)->fallback.tfm); ctx(tfm)->fallback.tfm = NULL; }
/* get_key_hash * * @enc_key: key of which the md5 hash has to be generated * * Returns the md5 hash of the key. Responsibility of freeing the hashed key lies with the caller who requested the hashed key. */ unsigned char *get_key_hash(unsigned char *enc_key) { /* imp, plaintext should be array else getting sefault so copy key in array here */ struct scatterlist sg; struct hash_desc desc; int i, err; unsigned char *hashed_key; unsigned char plaintext[AES_KEY_SIZE]; for (i = 0; i < AES_KEY_SIZE; i++) plaintext[i] = enc_key[i]; hashed_key = kmalloc(sizeof(char)*AES_KEY_SIZE, GFP_KERNEL); desc.tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(desc.tfm)) { err = PTR_ERR(desc.tfm); printk(KERN_ALERT"error in allocating hash"); goto ERR; } desc.flags = 0; sg_init_one(&sg, plaintext, AES_KEY_SIZE); err = crypto_hash_init(&desc); if (err) { printk(KERN_ALERT"error in initializing crypto hash\n"); goto ERR; } err = crypto_hash_update(&desc, &sg, AES_KEY_SIZE); if (err) { printk(KERN_ALERT"error in updating crypto hash\n"); goto ERR; } printk(KERN_ALERT"cry[to hash updated\n"); err = crypto_hash_final(&desc, hashed_key); if (err) { printk(KERN_ALERT"error in finalizing crypto hash\n"); goto ERR; } crypto_free_hash(desc.tfm); return hashed_key; ERR: if (desc.tfm) crypto_free_hash(desc.tfm); return ERR_PTR(err); }
void do_integrity_check(void) { u8 *rbuf = (u8 *) ZIMAGE_ADDR; u32 len; u8 hmac[SHA256_DIGEST_SIZE]; struct hash_desc desc; struct scatterlist sg; u8 *key = "12345678"; printk(KERN_INFO "FIPS: do kernel integrity check\n"); if (unlikely(!need_integrity_check || in_fips_err())) return; if (*((u32 *) &rbuf[36]) != 0x016F2818) { printk(KERN_ERR "FIPS: invalid zImage magic number."); set_in_fips_err(); goto err1; } if (*(u32 *) &rbuf[44] <= *(u32 *) &rbuf[40]) { printk(KERN_ERR "FIPS: invalid zImage calculated len"); set_in_fips_err(); goto err1; } len = *(u32 *) &rbuf[44] - *(u32 *) &rbuf[40]; desc.tfm = crypto_alloc_hash("hmac(sha256)", 0, 0); if (IS_ERR(desc.tfm)) { printk(KERN_ERR "FIPS: integ failed to allocate tfm %ld\n", PTR_ERR(desc.tfm)); set_in_fips_err(); goto err; } sg_init_one(&sg, rbuf, len); crypto_hash_setkey(desc.tfm, key, strlen(key)); crypto_hash_digest(&desc, &sg, len, hmac); if (!strncmp(hmac, &rbuf[len], SHA256_DIGEST_SIZE)) { printk(KERN_INFO "FIPS: integrity check passed\n"); } else { printk(KERN_ERR "FIPS: integrity check failed\n"); set_in_fips_err(); } err: crypto_free_hash(desc.tfm); err1: need_integrity_check = false; return; }
int siw_destroy_qp(struct ib_qp *ofa_qp) { struct siw_qp *qp = siw_qp_ofa2siw(ofa_qp); struct siw_qp_attrs qp_attrs; dprint(DBG_CM, "(QP%d): SIW QP state=%d, cep=0x%p\n", QP_ID(qp), qp->attrs.state, qp->cep); /* * Mark QP as in process of destruction to prevent from eventual async * callbacks to OFA core */ qp->attrs.flags |= SIW_QP_IN_DESTROY; qp->rx_ctx.rx_suspend = 1; down_write(&qp->state_lock); qp_attrs.state = SIW_QP_STATE_ERROR; (void)siw_qp_modify(qp, &qp_attrs, SIW_QP_ATTR_STATE); if (qp->cep) { siw_cep_put(qp->cep); qp->cep = NULL; } up_write(&qp->state_lock); if (qp->rx_ctx.crc_enabled) crypto_free_hash(qp->rx_ctx.mpa_crc_hd.tfm); if (qp->tx_ctx.crc_enabled) crypto_free_hash(qp->tx_ctx.mpa_crc_hd.tfm); /* Drop references */ siw_cq_put(qp->scq); siw_cq_put(qp->rcq); siw_pd_put(qp->pd); qp->scq = qp->rcq = NULL; siw_qp_put(qp); return 0; }
static void ah_destroy(struct xfrm_state *x) { struct ah_data *ahp = x->data; if (!ahp) return; kfree(ahp->work_icv); crypto_free_hash(ahp->tfm); kfree(ahp); }
static int cfs_crypto_hash_alloc(unsigned char alg_id, const struct cfs_crypto_hash_type **type, struct hash_desc *desc, unsigned char *key, unsigned int key_len) { int err = 0; *type = cfs_crypto_hash_type(alg_id); if (*type == NULL) { CWARN("Unsupported hash algorithm id = %d, max id is %d\n", alg_id, CFS_HASH_ALG_MAX); return -EINVAL; } desc->tfm = crypto_alloc_hash((*type)->cht_name, 0, 0); if (desc->tfm == NULL) return -EINVAL; if (IS_ERR(desc->tfm)) { CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n", (*type)->cht_name); return PTR_ERR(desc->tfm); } desc->flags = 0; /** Shash have different logic for initialization then digest * shash: crypto_hash_setkey, crypto_hash_init * digest: crypto_digest_init, crypto_digest_setkey * Skip this function for digest, because we use shash logic at * cfs_crypto_hash_alloc. */ if (key != NULL) { err = crypto_hash_setkey(desc->tfm, key, key_len); } else if ((*type)->cht_key != 0) { err = crypto_hash_setkey(desc->tfm, (unsigned char *)&((*type)->cht_key), (*type)->cht_size); } if (err != 0) { crypto_free_hash(desc->tfm); return err; } CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n", (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_name, (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_driver_name, cfs_crypto_hash_speeds[alg_id]); return crypto_hash_init(desc); }
static void crypt_iv_essiv_dtr(struct crypt_config *cc) { struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; crypto_free_cipher(essiv->tfm); essiv->tfm = NULL; crypto_free_hash(essiv->hash_tfm); essiv->hash_tfm = NULL; kzfree(essiv->salt); essiv->salt = NULL; }
int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum) { struct crypto_hash *tfm; struct hash_desc desc; struct scatterlist sg[2]; unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long)))); unsigned long *tmpsumptr = (unsigned long *)temp_sum; unsigned long *sumptr = (unsigned long *)sum; int cryptres; int retval = 1; volatile int mismatched = 0; volatile int dummy = 0; unsigned int i; tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { /* should never happen, since sha256 should be built in */ memset(entry->pw, 0, GR_PW_LEN); return 1; } sg_init_table(sg, 2); sg_set_buf(&sg[0], salt, GR_SALT_LEN); sg_set_buf(&sg[1], entry->pw, strlen(entry->pw)); desc.tfm = tfm; desc.flags = 0; cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw), temp_sum); memset(entry->pw, 0, GR_PW_LEN); if (cryptres) goto out; for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++) if (sumptr[i] != tmpsumptr[i]) mismatched = 1; else dummy = 1; // waste a cycle if (!mismatched) retval = dummy - 1; out: crypto_free_hash(tfm); return retval; }
void do_integrity_check(void) { u8* rbuf=__va(ZIMAGE_START); u32 len; u8 hmac[SHA256_DIGEST_SIZE]; struct hash_desc desc; struct scatterlist sg; u8* key="12345678"; printk(KERN_INFO "do kernel integrity check\n"); if (integrity_checked || in_fips_err()) return; if ( *((u32*) &rbuf[36]) != 0x016F2818) { printk(KERN_ERR "integ: invalid zImage magic number."); set_in_fips_err(); goto err; } len = *(u32*)&rbuf[44] - *(u32*)&rbuf[40]; if (len < 0) { printk(KERN_ERR "integ: invalid zImage calculated len"); set_in_fips_err(); goto err; } desc.tfm = crypto_alloc_hash("hmac(sha256)",0,0); if (IS_ERR(desc.tfm)) { printk(KERN_ERR "integ: failed to allocate tfm %ld\n",PTR_ERR(desc.tfm)); set_in_fips_err(); goto err; } sg_init_one(&sg, rbuf, len); crypto_hash_setkey(desc.tfm,key,strlen(key)); crypto_hash_digest(&desc,&sg,len,hmac); if (!strncmp(hmac,&rbuf[len],SHA256_DIGEST_SIZE)) { printk(KERN_INFO "integrity check passed"); } else { printk(KERN_ERR "integrity check failed"); set_in_fips_err(); } err: integrity_checked=true; crypto_free_hash(desc.tfm); return; }
int crypt_init_desc(struct hash_desc *desc, char *checksum_algorithm) { int rc; desc->tfm = crypto_alloc_hash(checksum_algorithm, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(desc->tfm)) { pr_info("failed to load %s transform: %ld\n", checksum_algorithm, PTR_ERR(desc->tfm)); rc = PTR_ERR(desc->tfm); return rc; } desc->flags = 0; rc = crypto_hash_init(desc); if (rc) crypto_free_hash(desc->tfm); return rc; }
int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum) { char *p; struct crypto_hash *tfm; struct hash_desc desc; struct scatterlist sg; unsigned char temp_sum[GR_SHA_LEN]; volatile int retval = 0; volatile int dummy = 0; unsigned int i; sg_init_table(&sg, 1); tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { /* should never happen, since sha256 should be built in */ return 1; } desc.tfm = tfm; desc.flags = 0; crypto_hash_init(&desc); p = salt; sg_set_buf(&sg, p, GR_SALT_LEN); crypto_hash_update(&desc, &sg, sg.length); p = entry->pw; sg_set_buf(&sg, p, strlen(p)); crypto_hash_update(&desc, &sg, sg.length); crypto_hash_final(&desc, temp_sum); memset(entry->pw, 0, GR_PW_LEN); for (i = 0; i < GR_SHA_LEN; i++) if (sum[i] != temp_sum[i]) retval = 1; else dummy = 1; // waste a cycle crypto_free_hash(tfm); return retval; }
static char *calc_hmac(char *plain_text, unsigned int plain_text_size, char *key, unsigned int key_size) { struct scatterlist sg; char *result; struct crypto_hash *tfm; struct hash_desc desc; int ret; tfm = crypto_alloc_hash("hmac(sha1)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { printk(KERN_ERR "failed to load transform for hmac(sha1): %ld\n", PTR_ERR(tfm)); return NULL; } desc.tfm = tfm; desc.flags = 0; result = kzalloc(TOSLSM_DIGEST_SIZE, GFP_KERNEL); if (!result) { printk(KERN_ERR "out of memory!\n"); goto out; } sg_set_buf(&sg, plain_text, plain_text_size); ret = crypto_hash_setkey(tfm, key, key_size); if (ret) { printk(KERN_ERR "setkey() failed ret=%d\n", ret); kfree(result); result = NULL; goto out; } ret = crypto_hash_digest(&desc, &sg, plain_text_size, result); if (ret) { printk(KERN_ERR "digest() failed ret=%d\n", ret); kfree(result); result = NULL; goto out; } out: crypto_free_hash(tfm); return result; }