int tls_key_x_server_params_hash(u16 tls_version, const u8 *client_random, const u8 *server_random, const u8 *server_params, size_t server_params_len, u8 *hash) { u8 *hpos; size_t hlen; struct crypto_hash *ctx; hpos = hash; ctx = crypto_hash_init(CRYPTO_HASH_ALG_MD5, NULL, 0); if (ctx == NULL) return -1; crypto_hash_update(ctx, client_random, TLS_RANDOM_LEN); crypto_hash_update(ctx, server_random, TLS_RANDOM_LEN); crypto_hash_update(ctx, server_params, server_params_len); hlen = MD5_MAC_LEN; if (crypto_hash_finish(ctx, hash, &hlen) < 0) return -1; hpos += hlen; ctx = crypto_hash_init(CRYPTO_HASH_ALG_SHA1, NULL, 0); if (ctx == NULL) return -1; crypto_hash_update(ctx, client_random, TLS_RANDOM_LEN); crypto_hash_update(ctx, server_random, TLS_RANDOM_LEN); crypto_hash_update(ctx, server_params, server_params_len); hlen = hash + sizeof(hash) - hpos; if (crypto_hash_finish(ctx, hpos, &hlen) < 0) return -1; hpos += hlen; return hpos - hash; }
int tls_verify_hash_init(struct tls_verify_hash *verify) { tls_verify_hash_free(verify); verify->md5_client = crypto_hash_init(CRYPTO_HASH_ALG_MD5, NULL, 0); verify->md5_server = crypto_hash_init(CRYPTO_HASH_ALG_MD5, NULL, 0); verify->md5_cert = crypto_hash_init(CRYPTO_HASH_ALG_MD5, NULL, 0); verify->sha1_client = crypto_hash_init(CRYPTO_HASH_ALG_SHA1, NULL, 0); verify->sha1_server = crypto_hash_init(CRYPTO_HASH_ALG_SHA1, NULL, 0); verify->sha1_cert = crypto_hash_init(CRYPTO_HASH_ALG_SHA1, NULL, 0); if (verify->md5_client == NULL || verify->md5_server == NULL || verify->md5_cert == NULL || verify->sha1_client == NULL || verify->sha1_server == NULL || verify->sha1_cert == NULL) { tls_verify_hash_free(verify); return -1; } #ifdef CONFIG_TLSV12 verify->sha256_client = crypto_hash_init(CRYPTO_HASH_ALG_SHA256, NULL, 0); verify->sha256_server = crypto_hash_init(CRYPTO_HASH_ALG_SHA256, NULL, 0); verify->sha256_cert = crypto_hash_init(CRYPTO_HASH_ALG_SHA256, NULL, 0); if (verify->sha256_client == NULL || verify->sha256_server == NULL || verify->sha256_cert == NULL) { tls_verify_hash_free(verify); return -1; } #endif /* CONFIG_TLSV12 */ return 0; }
static int init_hash (struct hash_desc * desc) { struct crypto_hash * tfm = NULL; int ret = -1; /* Same as build time */ const unsigned char * key = "The quick brown fox jumps over the lazy dog"; tfm = crypto_alloc_hash ("hmac(sha256)", 0, 0); if (IS_ERR(tfm)) { printk(KERN_ERR "FIPS(%s): integ failed to allocate tfm %ld", __FUNCTION__, PTR_ERR(tfm)); return -1; } ret = crypto_hash_setkey (tfm, key, strlen(key)); if (ret) { printk(KERN_ERR "FIPS(%s): fail at crypto_hash_setkey", __FUNCTION__); return -1; } desc->tfm = tfm; desc->flags = 0; ret = crypto_hash_init (desc); if (ret) { printk(KERN_ERR "FIPS(%s): fail at crypto_hash_init", __FUNCTION__); return -1; } return 0; }
static int __init sha1_init(void) { struct scatterlist sg; struct crypto_hash *tfm; struct hash_desc desc; unsigned char output[SHA1_LENGTH]; unsigned char buf[10]; int i; printk(KERN_INFO "sha1: %s\n", __FUNCTION__); memset(buf, 'A', 10); memset(output, 0x00, SHA1_LENGTH); tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); desc.tfm = tfm; desc.flags = 0; sg_init_one(&sg, buf, 10); crypto_hash_init(&desc); crypto_hash_update(&desc, &sg, 10); crypto_hash_final(&desc, output); for (i = 0; i < 20; i++) { printk(KERN_ERR "%d-%d\n", output[i], i); } crypto_free_hash(tfm); return 0; }
unsigned char * key_to_hash(unsigned char *key) { struct scatterlist sg; struct crypto_hash *tfm; struct hash_desc desc; unsigned char *digest= NULL; digest=kmalloc(16,GFP_KERNEL); if(IS_ERR(digest)){ printk("Error in allocating memory to Hash Key\n "); return NULL; } tfm = crypto_alloc_hash("md5", 0, 0); desc.tfm = tfm; desc.flags = 0; sg_init_one(&sg, key, 16); crypto_hash_init(&desc); crypto_hash_update(&desc, &sg, 16); crypto_hash_final(&desc, digest); crypto_free_hash(tfm); if(!digest){ printk("Error in hashing userland key\n"); return NULL; } return digest; }
/* The random function H(x) = HMAC-SHA256(0^32, x) */ struct crypto_hash * eap_pwd_h_init(void) { u8 allzero[SHA256_MAC_LEN]; os_memset(allzero, 0, SHA256_MAC_LEN); return crypto_hash_init(CRYPTO_HASH_ALG_HMAC_SHA256, allzero, SHA256_MAC_LEN); }
TEE_Result tee_hash_createdigest(uint32_t algo, const uint8_t *data, size_t datalen, uint8_t *digest, size_t digestlen) { TEE_Result res; void *ctx = NULL; res = crypto_hash_alloc_ctx(&ctx, algo); if (res) return res; res = crypto_hash_init(ctx, algo); if (res) goto out; if (datalen != 0) { res = crypto_hash_update(ctx, algo, data, datalen); if (res) goto out; } res = crypto_hash_final(ctx, algo, digest, digestlen); out: crypto_hash_free_ctx(ctx, algo); return res; }
// given a string, generate a 32-bit key int generate_key(char *pwd, u8 *pkey) { int len_pwd = strlen(pwd); struct scatterlist sg; struct crypto_hash *tfm; struct hash_desc desc; int i; unsigned char output[SHA1_LENGTH]; // key generated char *buf = kmalloc(MAX_PWD, GFP_KERNEL); // password buffer memset(buf, 0, MAX_PWD); strncpy(buf, pwd, len_pwd); tfm = crypto_alloc_hash("sha1", 1, CRYPTO_ALG_ASYNC); desc.tfm = tfm; desc.flags = 0; sg_init_one(&sg, buf, len_pwd); crypto_hash_init(&desc); crypto_hash_update(&desc, &sg, len_pwd); crypto_hash_final(&desc, output); for(i=0; i<16; i++) pkey[i] = output[i]; for(i=0; i<16; i++) pkey[i+16] = output[i]; crypto_free_hash(tfm); kfree(buf); return 0; }
static int pohmelfs_hash(struct pohmelfs_crypto_thread *tc) { struct pohmelfs_crypto_engine *e = &tc->eng; struct hash_desc *desc = e->data; unsigned char *dst = tc->trans->iovec.iov_base + sizeof(struct netfs_cmd); int err; desc->tfm = e->hash; desc->flags = 0; err = crypto_hash_init(desc); if (err) return err; err = pohmelfs_trans_iter(tc->trans, e, pohmelfs_hash_iterator); if (err) return err; err = crypto_hash_final(desc, dst); if (err) return err; { unsigned int i; dprintk("%s: ", __func__); for (i = 0; i < tc->psb->crypto_attached_size; ++i) dprintka("%02x ", dst[i]); dprintka("\n"); } return 0; }
/* checksum the plaintext data and hdrlen bytes of the token header */ s32 make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body, int body_offset, struct xdr_netobj *cksum) { struct hash_desc desc; /* XXX add to ctx? */ struct scatterlist sg[1]; int err; desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(desc.tfm)) return GSS_S_FAILURE; cksum->len = crypto_hash_digestsize(desc.tfm); desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; err = crypto_hash_init(&desc); if (err) goto out; sg_set_buf(sg, header, hdrlen); err = crypto_hash_update(&desc, sg, hdrlen); if (err) goto out; err = xdr_process_buf(body, body_offset, body->len - body_offset, checksummer, &desc); if (err) goto out; err = crypto_hash_final(&desc, cksum->data); out: crypto_free_hash(desc.tfm); return err ? GSS_S_FAILURE : 0; }
void sha1_init(void *c) { struct hash_desc *d = c; if (crypto_hash_init(d)) printk(KERN_INFO "crypto_hash_init()\n"); }
int hash_lbr(uint8_t hash[DIGEST_LENGTH],struct lbr_t *lbr) { struct scatterlist sg; int i, j; /* No error checking here. If anything fails, we better go straight home anyway. */ crypto_hash_init(&armor_desc); armor_desc.flags = 0; /* Loop over all LBR entries. */ for (i = 0; i < LBR_ENTRIES; i++) { sg_set_buf(&sg, &lbr->from[(lbr->tos - i) % LBR_ENTRIES], sizeof(uint64_t)); crypto_hash_update(&armor_desc, &sg, sizeof(uint64_t)); sg_set_buf(&sg, &lbr->to [(lbr->tos - i) % LBR_ENTRIES], sizeof(uint64_t)); crypto_hash_update(&armor_desc, &sg, sizeof(uint64_t)); printdj(false, "lbr[%2d], <from: 0x%012llx, to: 0x%012llx>\n", i, lbr->from[(lbr->tos+LBR_ENTRIES-i) % LBR_ENTRIES], lbr-> to[(lbr->tos+LBR_ENTRIES-i) % LBR_ENTRIES]); } ARMOR_STAT_INC(digests); crypto_hash_final(&armor_desc, hash); printdj(false, "hash: "); for (j = 0; j < DIGEST_LENGTH; j++) printdj(false,"%02x", hash[j]); printdj(false,"\n"); return 0; }
static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, const struct in6_addr *daddr, struct in6_addr *saddr, const struct tcphdr *th) { struct tcp_md5sig_pool *hp; struct hash_desc *desc; hp = tcp_get_md5sig_pool(); if (!hp) goto clear_hash_noput; desc = &hp->md5_desc; if (crypto_hash_init(desc)) goto clear_hash; if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2)) goto clear_hash; if (tcp_md5_hash_header(hp, th)) goto clear_hash; if (tcp_md5_hash_key(hp, key)) goto clear_hash; if (crypto_hash_final(desc, md5_hash)) goto clear_hash; tcp_put_md5sig_pool(); return 0; clear_hash: tcp_put_md5sig_pool(); clear_hash_noput: memset(md5_hash, 0, 16); return 1; }
TEE_Result tee_cryp_concat_kdf(uint32_t hash_id, const uint8_t *shared_secret, size_t shared_secret_len, const uint8_t *other_info, size_t other_info_len, uint8_t *derived_key, size_t derived_key_len) { TEE_Result res; size_t hash_len, i, n, sz; void *ctx = NULL; uint8_t tmp[TEE_MAX_HASH_SIZE]; uint32_t be_count; uint8_t *out = derived_key; uint32_t hash_algo = TEE_ALG_HASH_ALGO(hash_id); res = crypto_hash_alloc_ctx(&ctx, hash_algo); if (res != TEE_SUCCESS) return res; res = tee_hash_get_digest_size(hash_algo, &hash_len); if (res != TEE_SUCCESS) goto out; n = derived_key_len / hash_len; sz = hash_len; for (i = 1; i <= n + 1; i++) { be_count = TEE_U32_TO_BIG_ENDIAN(i); res = crypto_hash_init(ctx, hash_algo); if (res != TEE_SUCCESS) goto out; res = crypto_hash_update(ctx, hash_algo, (uint8_t *)&be_count, sizeof(be_count)); if (res != TEE_SUCCESS) goto out; res = crypto_hash_update(ctx, hash_algo, shared_secret, shared_secret_len); if (res != TEE_SUCCESS) goto out; if (other_info && other_info_len) { res = crypto_hash_update(ctx, hash_algo, other_info, other_info_len); if (res != TEE_SUCCESS) goto out; } res = crypto_hash_final(ctx, hash_algo, tmp, sizeof(tmp)); if (res != TEE_SUCCESS) goto out; if (i == n + 1) sz = derived_key_len % hash_len; memcpy(out, tmp, sz); out += sz; } res = TEE_SUCCESS; out: crypto_hash_free_ctx(ctx, hash_algo); return res; }
static int cfs_crypto_hash_alloc(unsigned char alg_id, const struct cfs_crypto_hash_type **type, struct hash_desc *desc, unsigned char *key, unsigned int key_len) { int err = 0; *type = cfs_crypto_hash_type(alg_id); if (*type == NULL) { CWARN("Unsupported hash algorithm id = %d, max id is %d\n", alg_id, CFS_HASH_ALG_MAX); return -EINVAL; } desc->tfm = crypto_alloc_hash((*type)->cht_name, 0, 0); if (desc->tfm == NULL) return -EINVAL; if (IS_ERR(desc->tfm)) { CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n", (*type)->cht_name); return PTR_ERR(desc->tfm); } desc->flags = 0; /** Shash have different logic for initialization then digest * shash: crypto_hash_setkey, crypto_hash_init * digest: crypto_digest_init, crypto_digest_setkey * Skip this function for digest, because we use shash logic at * cfs_crypto_hash_alloc. */ if (key != NULL) { err = crypto_hash_setkey(desc->tfm, key, key_len); } else if ((*type)->cht_key != 0) { err = crypto_hash_setkey(desc->tfm, (unsigned char *)&((*type)->cht_key), (*type)->cht_size); } if (err != 0) { crypto_free_hash(desc->tfm); return err; } CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n", (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_name, (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_driver_name, cfs_crypto_hash_speeds[alg_id]); return crypto_hash_init(desc); }
/* * Helper function to set up segment buffer */ static inline void __iscsi_segment_init(struct iscsi_segment *segment, size_t size, iscsi_segment_done_fn_t *done, struct hash_desc *hash) { memset(segment, 0, sizeof(*segment)); segment->total_size = size; segment->done = done; if (hash) { segment->hash = hash; crypto_hash_init(hash); } }
static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, const struct sock *sk, const struct request_sock *req, const struct sk_buff *skb) { const struct in6_addr *saddr, *daddr; struct tcp_md5sig_pool *hp; struct hash_desc *desc; const struct tcphdr *th = tcp_hdr(skb); if (sk) { saddr = &inet6_sk(sk)->saddr; daddr = &inet6_sk(sk)->daddr; } else if (req) { saddr = &inet6_rsk(req)->loc_addr; daddr = &inet6_rsk(req)->rmt_addr; } else { const struct ipv6hdr *ip6h = ipv6_hdr(skb); saddr = &ip6h->saddr; daddr = &ip6h->daddr; } hp = tcp_get_md5sig_pool(); if (!hp) goto clear_hash_noput; desc = &hp->md5_desc; if (crypto_hash_init(desc)) goto clear_hash; if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len)) goto clear_hash; if (tcp_md5_hash_header(hp, th)) goto clear_hash; if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2)) goto clear_hash; if (tcp_md5_hash_key(hp, key)) goto clear_hash; if (crypto_hash_final(desc, md5_hash)) goto clear_hash; tcp_put_md5sig_pool(); return 0; clear_hash: tcp_put_md5sig_pool(); clear_hash_noput: memset(md5_hash, 0, 16); return 1; }
int crypt_init_desc(struct hash_desc *desc, char *checksum_algorithm) { int rc; desc->tfm = crypto_alloc_hash(checksum_algorithm, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(desc->tfm)) { pr_info("failed to load %s transform: %ld\n", checksum_algorithm, PTR_ERR(desc->tfm)); rc = PTR_ERR(desc->tfm); return rc; } desc->flags = 0; rc = crypto_hash_init(desc); if (rc) crypto_free_hash(desc->tfm); return rc; }
int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum) { char *p; struct crypto_hash *tfm; struct hash_desc desc; struct scatterlist sg; unsigned char temp_sum[GR_SHA_LEN]; volatile int retval = 0; volatile int dummy = 0; unsigned int i; sg_init_table(&sg, 1); tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { /* should never happen, since sha256 should be built in */ return 1; } desc.tfm = tfm; desc.flags = 0; crypto_hash_init(&desc); p = salt; sg_set_buf(&sg, p, GR_SALT_LEN); crypto_hash_update(&desc, &sg, sg.length); p = entry->pw; sg_set_buf(&sg, p, strlen(p)); crypto_hash_update(&desc, &sg, sg.length); crypto_hash_final(&desc, temp_sum); memset(entry->pw, 0, GR_PW_LEN); for (i = 0; i < GR_SHA_LEN; i++) if (sum[i] != temp_sum[i]) retval = 1; else dummy = 1; // waste a cycle crypto_free_hash(tfm); return retval; }
/* * Calculates block's hash value, to avoid all block compare. * hash_out must be allocated outside. */ int calc_hash(char* data, size_t size, u8* hash_out) { struct hash_desc sha256_desc; struct scatterlist sg; sha256_desc.tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC); sg_init_one(&sg, data, size); crypto_hash_init(&sha256_desc); crypto_hash_update(&sha256_desc, &sg, size); crypto_hash_final(&sha256_desc, hash_out); crypto_free_hash(sha256_desc.tfm); return 0; }
static void padlock_sha_bypass(struct crypto_tfm *tfm) { if (ctx(tfm)->bypass) return; crypto_hash_init(&ctx(tfm)->fallback); if (ctx(tfm)->data && ctx(tfm)->used) { struct scatterlist sg; sg_init_one(&sg, ctx(tfm)->data, ctx(tfm)->used); crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length); } ctx(tfm)->used = 0; ctx(tfm)->bypass = 1; }
/** * Initialize the state descriptor for the specified hash algorithm. * * An internal routine to allocate the hash-specific state in \a hdesc for * use with cfs_crypto_hash_digest() to compute the hash of a single message, * though possibly in multiple chunks. The descriptor internal state should * be freed with cfs_crypto_hash_final(). * * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*) * \param[out] type pointer to the hash description in hash_types[] array * \param[in,out] hdesc hash state descriptor to be initialized * \param[in] key initial hash value/state, NULL to use default value * \param[in] key_len length of \a key * * \retval 0 on success * \retval negative errno on failure */ static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg, const struct cfs_crypto_hash_type **type, struct hash_desc *hdesc, unsigned char *key, unsigned int key_len) { int err = 0; *type = cfs_crypto_hash_type(hash_alg); if (*type == NULL) { CWARN("Unsupported hash algorithm id = %d, max id is %d\n", hash_alg, CFS_HASH_ALG_MAX); return -EINVAL; } hdesc->tfm = crypto_alloc_hash((*type)->cht_name, 0, 0); if (hdesc->tfm == NULL) return -EINVAL; if (IS_ERR(hdesc->tfm)) { CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n", (*type)->cht_name); return PTR_ERR(hdesc->tfm); } hdesc->flags = 0; if (key != NULL) err = crypto_hash_setkey(hdesc->tfm, key, key_len); else if ((*type)->cht_key != 0) err = crypto_hash_setkey(hdesc->tfm, (unsigned char *)&((*type)->cht_key), (*type)->cht_size); if (err != 0) { crypto_free_hash(hdesc->tfm); return err; } CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n", (crypto_hash_tfm(hdesc->tfm))->__crt_alg->cra_name, (crypto_hash_tfm(hdesc->tfm))->__crt_alg->cra_driver_name, cfs_crypto_hash_speeds[hash_alg]); return crypto_hash_init(hdesc); }
static int init_desc(struct hash_desc *desc) { int rc; desc->tfm = crypto_alloc_hash(ima_hash, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(desc->tfm)) { pr_info("failed to load %s transform: %ld\n", ima_hash, PTR_ERR(desc->tfm)); rc = PTR_ERR(desc->tfm); return rc; } desc->flags = 0; rc = crypto_hash_init(desc); if (rc) crypto_free_hash(desc->tfm); return rc; }
static void digest_header(struct hash_desc *hash, struct iscsi_pdu *pdu, u8 *crc) { struct scatterlist sg[2]; unsigned int nbytes = sizeof(struct iscsi_hdr); sg_init_table(sg, pdu->ahssize ? 2 : 1); sg_set_buf(&sg[0], &pdu->bhs, nbytes); if (pdu->ahssize) { sg_set_buf(&sg[1], pdu->ahs, pdu->ahssize); nbytes += pdu->ahssize; } crypto_hash_init(hash); crypto_hash_update(hash, sg, nbytes); crypto_hash_final(hash, crc); }
/* a counter-based KDF based on NIST SP800-108 */ static int eap_pwd_kdf(const u8 *key, size_t keylen, const u8 *label, size_t labellen, u8 *result, size_t resultbitlen) { struct crypto_hash *hash; u8 digest[SHA256_MAC_LEN]; u16 i, ctr, L; size_t resultbytelen, len = 0, mdlen; resultbytelen = (resultbitlen + 7) / 8; ctr = 0; L = htons(resultbitlen); while (len < resultbytelen) { ctr++; i = htons(ctr); hash = crypto_hash_init(CRYPTO_HASH_ALG_HMAC_SHA256, key, keylen); if (hash == NULL) { return -1; } if (ctr > 1) { crypto_hash_update(hash, digest, SHA256_MAC_LEN); } crypto_hash_update(hash, (u8 *)&i, sizeof(u16)); crypto_hash_update(hash, label, labellen); crypto_hash_update(hash, (u8 *)&L, sizeof(u16)); mdlen = SHA256_MAC_LEN; if (crypto_hash_finish(hash, digest, &mdlen) < 0) { return -1; } if ((len + mdlen) > resultbytelen) { os_memcpy(result + len, digest, resultbytelen - len); } else { os_memcpy(result + len, digest, mdlen); } len += mdlen; } /* since we're expanding to a bit length, mask off the excess */ if (resultbitlen % 8) { u8 mask = 0xff; mask <<= (8 - (resultbitlen % 8)); result[resultbytelen - 1] &= mask; } return 0; }
/* get_key_hash * * @enc_key: key of which the md5 hash has to be generated * * Returns the md5 hash of the key. Responsibility of freeing the hashed key lies with the caller who requested the hashed key. */ unsigned char *get_key_hash(unsigned char *enc_key) { /* imp, plaintext should be array else getting sefault so copy key in array here */ struct scatterlist sg; struct hash_desc desc; int i, err; unsigned char *hashed_key; unsigned char plaintext[AES_KEY_SIZE]; for (i = 0; i < AES_KEY_SIZE; i++) plaintext[i] = enc_key[i]; hashed_key = kmalloc(sizeof(char)*AES_KEY_SIZE, GFP_KERNEL); desc.tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(desc.tfm)) { err = PTR_ERR(desc.tfm); printk(KERN_ALERT"error in allocating hash"); goto ERR; } desc.flags = 0; sg_init_one(&sg, plaintext, AES_KEY_SIZE); err = crypto_hash_init(&desc); if (err) { printk(KERN_ALERT"error in initializing crypto hash\n"); goto ERR; } err = crypto_hash_update(&desc, &sg, AES_KEY_SIZE); if (err) { printk(KERN_ALERT"error in updating crypto hash\n"); goto ERR; } printk(KERN_ALERT"cry[to hash updated\n"); err = crypto_hash_final(&desc, hashed_key); if (err) { printk(KERN_ALERT"error in finalizing crypto hash\n"); goto ERR; } crypto_free_hash(desc.tfm); return hashed_key; ERR: if (desc.tfm) crypto_free_hash(desc.tfm); return ERR_PTR(err); }
static void HMAC_sha1(const __u8 *key, int keyLength, void *input, int inputLength, __u8 *output) { struct scatterlist sg[1]; struct hash_desc desc; struct crypto_hash *hash_tfm = crypto_alloc_hash("hmac(sha1)", 0, CRYPTO_ALG_ASYNC); desc.tfm = hash_tfm; desc.flags = 0; sg_set_buf(&sg[0], input, inputLength); crypto_hash_init(&desc); crypto_hash_setkey(desc.tfm, key, keyLength); crypto_hash_digest(&desc, &sg[0], inputLength, output); crypto_free_hash(hash_tfm); }
int calculate_key(char *key,char *chksum,int len) { int rc =0; struct scatterlist sg; struct hash_desc desc; desc.flags = 0; desc.tfm = crypto_alloc_hash("md5",0,CRYPTO_ALG_ASYNC); if(IS_ERR(desc.tfm)) { rc = PTR_ERR(desc.tfm); goto out; } if(crypto_hash_init(&desc)) goto out; sg_init_one(&sg,(u8*)key,len); crypto_hash_update(&desc,&sg,len); crypto_hash_final(&desc,chksum); out: return rc; }
static void digest_data(struct hash_desc *hash, struct iscsi_cmnd *cmnd, struct tio *tio, u32 offset, u8 *crc) { struct scatterlist *sg = cmnd->conn->hash_sg; u32 size, length; int i, idx, count; unsigned int nbytes; size = cmnd->pdu.datasize; nbytes = size = (size + 3) & ~3; offset += tio->offset; idx = offset >> PAGE_CACHE_SHIFT; offset &= ~PAGE_CACHE_MASK; count = get_pgcnt(size, offset); assert(idx + count <= tio->pg_cnt); assert(count <= ISCSI_CONN_IOV_MAX); sg_init_table(sg, ARRAY_SIZE(cmnd->conn->hash_sg)); crypto_hash_init(hash); for (i = 0; size; i++) { if (offset + size > PAGE_CACHE_SIZE) length = PAGE_CACHE_SIZE - offset; else length = size; sg_set_page(&sg[i], tio->pvec[idx + i], length, offset); size -= length; offset = 0; } sg_mark_end(&sg[i - 1]); crypto_hash_update(hash, sg, nbytes); crypto_hash_final(hash, crc); }
void mars_digest(unsigned char *digest, void *data, int len) { struct hash_desc desc = { .tfm = mars_tfm, .flags = 0, }; struct scatterlist sg; memset(digest, 0, mars_digest_size); // TODO: use per-thread instance, omit locking down(&tfm_sem); crypto_hash_init(&desc); sg_init_table(&sg, 1); sg_set_buf(&sg, data, len); crypto_hash_update(&desc, &sg, sg.length); crypto_hash_final(&desc, digest); up(&tfm_sem); } EXPORT_SYMBOL_GPL(mars_digest); void mref_checksum(struct mref_object *mref) { unsigned char checksum[mars_digest_size]; int len; if (mref->ref_cs_mode <= 0 || !mref->ref_data) return; mars_digest(checksum, mref->ref_data, mref->ref_len); len = sizeof(mref->ref_checksum); if (len > mars_digest_size) len = mars_digest_size; memcpy(&mref->ref_checksum, checksum, len); }