/* Initialise ESSIV - compute salt but no local memory allocations */ static int crypt_iv_essiv_init(struct crypt_config *cc) { struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; struct hash_desc desc; struct scatterlist sg; struct crypto_cipher *essiv_tfm; int err; sg_init_one(&sg, cc->key, cc->key_size); desc.tfm = essiv->hash_tfm; desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt); if (err) return err; essiv_tfm = cc->iv_private; err = crypto_cipher_setkey(essiv_tfm, essiv->salt, crypto_hash_digestsize(essiv->hash_tfm)); if (err) return err; return 0; }
__be32 nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname) { struct xdr_netobj cksum; struct hash_desc desc; struct scatterlist sg; __be32 status = nfserr_resource; dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n", clname->len, clname->data); desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; desc.tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(desc.tfm)) goto out_no_tfm; cksum.len = crypto_hash_digestsize(desc.tfm); cksum.data = kmalloc(cksum.len, GFP_KERNEL); if (cksum.data == NULL) goto out; sg_init_one(&sg, clname->data, clname->len); if (crypto_hash_digest(&desc, &sg, sg.length, cksum.data)) goto out; md5_to_hex(dname, cksum.data); kfree(cksum.data); status = nfs_ok; out: crypto_free_hash(desc.tfm); out_no_tfm: return status; }
/************************************************************************** * KERNEL SHA1 FUNCTION **************************************************************************/ unsigned int sbchk_sha1(char * code, unsigned int code_len, char* result) { unsigned int ret = SEC_OK; struct scatterlist sg[1]; struct crypto_hash *tfm = NULL; struct hash_desc desc; tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); if(IS_ERR(tfm)) { ret = SBCHK_BASE_HASH_INIT_FAIL; goto _exit; } /* sg_init_one(&sg[0], plaintext, length); */ sg_set_buf(&sg[0], code, code_len); desc.tfm = tfm; desc.flags = 0; memset(result, 0, 20); /* SHA1 returns 20 bytes */ if (crypto_hash_digest(&desc, sg, code_len, result)) { ret = SBCHK_BASE_HASH_DATA_FAIL; goto _exit; } crypto_free_hash(tfm); _exit: return ret; }
/** * Calculate hash digest for the passed buffer. * * This should be used when computing the hash on a single contiguous buffer. * It combines the hash initialization, computation, and cleanup. * * \param[in] hash_alg id of hash algorithm (CFS_HASH_ALG_*) * \param[in] buf data buffer on which to compute hash * \param[in] buf_len length of \a buf in bytes * \param[in] key initial value/state for algorithm, if \a key = NULL * use default initial value * \param[in] key_len length of \a key in bytes * \param[out] hash pointer to computed hash value, if \a hash = NULL then * \a hash_len is to digest size in bytes, retval -ENOSPC * \param[in,out] hash_len size of \a hash buffer * * \retval -EINVAL \a buf, \a buf_len, \a hash_len, \a alg_id invalid * \retval -ENOENT \a hash_alg is unsupported * \retval -ENOSPC \a hash is NULL, or \a hash_len less than digest size * \retval 0 for success * \retval negative errno for other errors from lower layers. */ int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg, const void *buf, unsigned int buf_len, unsigned char *key, unsigned int key_len, unsigned char *hash, unsigned int *hash_len) { struct scatterlist sl; struct hash_desc hdesc; int err; const struct cfs_crypto_hash_type *type; if (buf == NULL || buf_len == 0 || hash_len == NULL) return -EINVAL; err = cfs_crypto_hash_alloc(hash_alg, &type, &hdesc, key, key_len); if (err != 0) return err; if (hash == NULL || *hash_len < type->cht_size) { *hash_len = type->cht_size; crypto_free_hash(hdesc.tfm); return -ENOSPC; } sg_init_one(&sl, (void *)buf, buf_len); hdesc.flags = 0; err = crypto_hash_digest(&hdesc, &sl, sl.length, hash); crypto_free_hash(hdesc.tfm); return err; }
static int generate_md5(char *src, char *dest, int len) { struct scatterlist sg[1]; struct crypto_hash *tfm; struct hash_desc desc; int ret = 0; tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { ret = -EAGAIN; goto out; } desc.tfm = tfm; desc.flags = 0; sg_init_table(sg, 1); sg_set_buf(sg, src, len); if (crypto_hash_digest(&desc, sg, 1, dest) ) ret = -EAGAIN; out : crypto_free_hash(tfm); return ret; }
long check_module_hash(char *mod, unsigned int len) { /* caller holds module_mutex, so no concurrency */ static struct hash_desc desc; static struct scatterlist sg[ MAX_MODULE_SIZE >> PAGE_SHIFT ]; int i, m, n, pages = (len + PAGE_SIZE) >> PAGE_SHIFT; unsigned char sha1_result[20]; /* allocate on first invocation */ if (!desc.tfm) desc.tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(desc.tfm)) return PTR_ERR(desc.tfm); sg_init_table( sg, pages ); for (i = 0; i < pages; i++) sg_set_buf( &sg[i], &mod[PAGE_SIZE * i], PAGE_SIZE ); crypto_hash_digest(&desc, sg, len, sha1_result); /* use the first nibble to decide where to start searching, and take single steps from there */ for (n = 0, i = ((sha1_result[0] >> 4) * modcount) >> 4; i >= 0 && i < modcount; i += (n = m)) { if (!(m = hashcmp(sha1_result, modhashes[i]))) return 0; /* bail if we are about to change direction */ if (n && (m < 0)^(n < 0)) break; } return -EPERM; }
int orinoco_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *da, u8 *sa, u8 priority, u8 *data, size_t data_len, u8 *mic) { struct hash_desc desc; struct scatterlist sg[2]; u8 hdr[ETH_HLEN + 2]; if (tfm_michael == NULL) { printk(KERN_WARNING "orinoco_mic: tfm_michael == NULL\n"); return -1; } memcpy(&hdr[0], da, ETH_ALEN); memcpy(&hdr[ETH_ALEN], sa, ETH_ALEN); hdr[ETH_ALEN * 2] = priority; hdr[ETH_ALEN * 2 + 1] = 0; hdr[ETH_ALEN * 2 + 2] = 0; hdr[ETH_ALEN * 2 + 3] = 0; sg_init_table(sg, 2); sg_set_buf(&sg[0], hdr, sizeof(hdr)); sg_set_buf(&sg[1], data, data_len); if (crypto_hash_setkey(tfm_michael, key, MIC_KEYLEN)) return -1; desc.tfm = tfm_michael; desc.flags = 0; return crypto_hash_digest(&desc, sg, data_len + sizeof(hdr), mic); }
int orinoco_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *da, u8 *sa, u8 priority, u8 *data, size_t data_len, u8 *mic) { struct hash_desc desc; struct scatterlist sg[2]; u8 hdr[ETH_HLEN + 2]; /* size of header + padding */ if (tfm_michael == NULL) { printk(KERN_WARNING "orinoco_mic: tfm_michael == NULL\n"); return -1; } /* Copy header into buffer. We need the padding on the end zeroed */ memcpy(&hdr[0], da, ETH_ALEN); memcpy(&hdr[ETH_ALEN], sa, ETH_ALEN); hdr[ETH_ALEN * 2] = priority; hdr[ETH_ALEN * 2 + 1] = 0; hdr[ETH_ALEN * 2 + 2] = 0; hdr[ETH_ALEN * 2 + 3] = 0; /* Use scatter gather to MIC header and data in one go */ sg_init_table(sg, 2); sg_set_buf(&sg[0], hdr, sizeof(hdr)); sg_set_buf(&sg[1], data, data_len); if (crypto_hash_setkey(tfm_michael, key, MIC_KEYLEN)) return -1; desc.tfm = tfm_michael; desc.flags = 0; return crypto_hash_digest(&desc, sg, data_len + sizeof(hdr), mic); }
inline void iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen, unsigned char digest[ISCSI_DIGEST_SIZE]) { struct scatterlist sg; sg_init_one(&sg, hdr, hdrlen); crypto_hash_digest(hash, &sg, hdrlen, digest); }
void check_checksums(void) { int pfn, index = 0, cpu = smp_processor_id(); char current_checksum[CHECKSUM_SIZE]; struct cpu_context *ctx = &per_cpu(contexts, cpu); if (!toi_checksum_ops.enabled) { toi_message(TOI_IO, TOI_VERBOSE, 0, "Checksumming disabled."); return; } next_page = (unsigned long) page_list; toi_num_resaved = 0; this_checksum = 0; toi_message(TOI_IO, TOI_VERBOSE, 0, "Verifying checksums."); memory_bm_position_reset(pageset2_map); for (pfn = memory_bm_next_pfn(pageset2_map); pfn != BM_END_OF_MAP; pfn = memory_bm_next_pfn(pageset2_map)) { int ret; char *pa; struct page *page = pfn_to_page(pfn); if (index % CHECKSUMS_PER_PAGE) { this_checksum += CHECKSUM_SIZE; } else { this_checksum = next_page + sizeof(void *); next_page = *((unsigned long *) next_page); } /* Done when IRQs disabled so must be atomic */ pa = kmap_atomic(page); memcpy(ctx->buf, pa, PAGE_SIZE); kunmap_atomic(pa); ret = crypto_hash_digest(&ctx->desc, ctx->sg, PAGE_SIZE, current_checksum); if (ret) { printk(KERN_INFO "Digest failed. Returned %d.\n", ret); return; } if (memcmp(current_checksum, (char *) this_checksum, CHECKSUM_SIZE)) { toi_message(TOI_IO, TOI_VERBOSE, 0, "Resaving %ld.", pfn); SetPageResave(pfn_to_page(pfn)); toi_num_resaved++; if (test_action_state(TOI_ABORT_ON_RESAVE_NEEDED)) set_abort_result(TOI_RESAVE_NEEDED); } index++; } toi_message(TOI_IO, TOI_VERBOSE, 0, "Checksum verification complete."); }
void do_integrity_check(void) { u8 *rbuf = (u8 *) ZIMAGE_ADDR; u32 len; u8 hmac[SHA256_DIGEST_SIZE]; struct hash_desc desc; struct scatterlist sg; u8 *key = "12345678"; printk(KERN_INFO "FIPS: do kernel integrity check\n"); if (unlikely(!need_integrity_check || in_fips_err())) return; if (*((u32 *) &rbuf[36]) != 0x016F2818) { printk(KERN_ERR "FIPS: invalid zImage magic number."); set_in_fips_err(); goto err1; } if (*(u32 *) &rbuf[44] <= *(u32 *) &rbuf[40]) { printk(KERN_ERR "FIPS: invalid zImage calculated len"); set_in_fips_err(); goto err1; } len = *(u32 *) &rbuf[44] - *(u32 *) &rbuf[40]; desc.tfm = crypto_alloc_hash("hmac(sha256)", 0, 0); if (IS_ERR(desc.tfm)) { printk(KERN_ERR "FIPS: integ failed to allocate tfm %ld\n", PTR_ERR(desc.tfm)); set_in_fips_err(); goto err; } sg_init_one(&sg, rbuf, len); crypto_hash_setkey(desc.tfm, key, strlen(key)); crypto_hash_digest(&desc, &sg, len, hmac); if (!strncmp(hmac, &rbuf[len], SHA256_DIGEST_SIZE)) { printk(KERN_INFO "FIPS: integrity check passed\n"); } else { printk(KERN_ERR "FIPS: integrity check failed\n"); set_in_fips_err(); } err: crypto_free_hash(desc.tfm); err1: need_integrity_check = false; return; }
int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum) { struct crypto_hash *tfm; struct hash_desc desc; struct scatterlist sg[2]; unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long)))); unsigned long *tmpsumptr = (unsigned long *)temp_sum; unsigned long *sumptr = (unsigned long *)sum; int cryptres; int retval = 1; volatile int mismatched = 0; volatile int dummy = 0; unsigned int i; tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { /* should never happen, since sha256 should be built in */ memset(entry->pw, 0, GR_PW_LEN); return 1; } sg_init_table(sg, 2); sg_set_buf(&sg[0], salt, GR_SALT_LEN); sg_set_buf(&sg[1], entry->pw, strlen(entry->pw)); desc.tfm = tfm; desc.flags = 0; cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw), temp_sum); memset(entry->pw, 0, GR_PW_LEN); if (cryptres) goto out; for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++) if (sumptr[i] != tmpsumptr[i]) mismatched = 1; else dummy = 1; // waste a cycle if (!mismatched) retval = dummy - 1; out: crypto_free_hash(tfm); return retval; }
void do_integrity_check(void) { u8* rbuf=__va(ZIMAGE_START); u32 len; u8 hmac[SHA256_DIGEST_SIZE]; struct hash_desc desc; struct scatterlist sg; u8* key="12345678"; printk(KERN_INFO "do kernel integrity check\n"); if (integrity_checked || in_fips_err()) return; if ( *((u32*) &rbuf[36]) != 0x016F2818) { printk(KERN_ERR "integ: invalid zImage magic number."); set_in_fips_err(); goto err; } len = *(u32*)&rbuf[44] - *(u32*)&rbuf[40]; if (len < 0) { printk(KERN_ERR "integ: invalid zImage calculated len"); set_in_fips_err(); goto err; } desc.tfm = crypto_alloc_hash("hmac(sha256)",0,0); if (IS_ERR(desc.tfm)) { printk(KERN_ERR "integ: failed to allocate tfm %ld\n",PTR_ERR(desc.tfm)); set_in_fips_err(); goto err; } sg_init_one(&sg, rbuf, len); crypto_hash_setkey(desc.tfm,key,strlen(key)); crypto_hash_digest(&desc,&sg,len,hmac); if (!strncmp(hmac,&rbuf[len],SHA256_DIGEST_SIZE)) { printk(KERN_INFO "integrity check passed"); } else { printk(KERN_ERR "integrity check failed"); set_in_fips_err(); } err: integrity_checked=true; crypto_free_hash(desc.tfm); return; }
static int DriverEnvironment_HMAC(const char *algo, const void *key, size_t key_len, const void *data, size_t data_len, void *result, size_t result_len) { struct crypto_hash *tfm; struct scatterlist sg[1]; struct hash_desc desc; int ret; tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC); if(IS_ERR(tfm)) { DE_TRACE_INT(TR_CRYPTO, "failed to allocate hash (%ld)\n", PTR_ERR(tfm)); return WIFI_ENGINE_FAILURE; } if(crypto_hash_digestsize(tfm) > result_len) { crypto_free_hash(tfm); return WIFI_ENGINE_FAILURE_INVALID_LENGTH; } sg_init_one(&sg[0], data, data_len); crypto_hash_clear_flags(tfm, ~0); ret = crypto_hash_setkey(tfm, key, key_len); if(ret != 0) { DE_TRACE_INT(TR_CRYPTO, "failed to set key (%d)\n", ret); crypto_free_hash(tfm); return WIFI_ENGINE_FAILURE; } desc.tfm = tfm; desc.flags = 0; ret = crypto_hash_digest(&desc, sg, data_len, result); if(ret != 0) { DE_TRACE_INT(TR_CRYPTO, "faild to digest (%d)\n", ret); crypto_free_hash(tfm); return WIFI_ENGINE_FAILURE; } crypto_free_hash(tfm); return WIFI_ENGINE_SUCCESS; }
static char *calc_hmac(char *plain_text, unsigned int plain_text_size, char *key, unsigned int key_size) { struct scatterlist sg; char *result; struct crypto_hash *tfm; struct hash_desc desc; int ret; tfm = crypto_alloc_hash("hmac(sha1)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { printk(KERN_ERR "failed to load transform for hmac(sha1): %ld\n", PTR_ERR(tfm)); return NULL; } desc.tfm = tfm; desc.flags = 0; result = kzalloc(TOSLSM_DIGEST_SIZE, GFP_KERNEL); if (!result) { printk(KERN_ERR "out of memory!\n"); goto out; } sg_set_buf(&sg, plain_text, plain_text_size); ret = crypto_hash_setkey(tfm, key, key_size); if (ret) { printk(KERN_ERR "setkey() failed ret=%d\n", ret); kfree(result); result = NULL; goto out; } ret = crypto_hash_digest(&desc, &sg, plain_text_size, result); if (ret) { printk(KERN_ERR "digest() failed ret=%d\n", ret); kfree(result); result = NULL; goto out; } out: crypto_free_hash(tfm); return result; }
int tuxonice_calc_checksum(struct page *page, char *checksum_locn) { char *pa; int result, cpu = smp_processor_id(); struct cpu_context *ctx = &per_cpu(contexts, cpu); if (!toi_checksum_ops.enabled) return 0; pa = kmap(page); memcpy(ctx->buf, pa, PAGE_SIZE); kunmap(page); result = crypto_hash_digest(&ctx->desc, ctx->sg, PAGE_SIZE, checksum_locn); if (result) printk(KERN_ERR "TuxOnIce checksumming: crypto_hash_digest " "returned %d.\n", result); return result; }
static void HMAC_sha1(const __u8 *key, int keyLength, void *input, int inputLength, __u8 *output) { struct scatterlist sg[1]; struct hash_desc desc; struct crypto_hash *hash_tfm = crypto_alloc_hash("hmac(sha1)", 0, CRYPTO_ALG_ASYNC); desc.tfm = hash_tfm; desc.flags = 0; sg_set_buf(&sg[0], input, inputLength); crypto_hash_init(&desc); crypto_hash_setkey(desc.tfm, key, keyLength); crypto_hash_digest(&desc, &sg[0], inputLength, output); crypto_free_hash(hash_tfm); }
s32 dp_bfd_set_hash(s8* type, dp_bfd_packet_s *packet, dp_bfd_session_s *session, u8* digest) { s32 ret = ERROR_FAIL; struct crypto_hash *tfm; struct hash_desc desc; struct scatterlist sg; u32 key_length = 0; if (0 == strcmp(type,"md5")) { key_length = 16; } else if (0 == strcmp(type,"sha1")) { key_length = 20; } else { return ERROR_FAIL; } tfm = crypto_alloc_hash(type, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { return ERROR_FAIL; } desc.tfm = tfm; desc.flags = 0; sg_init_one(&sg, (u8 *)packet, packet->length); if(crypto_hash_digest(&desc, &sg, packet->length, digest)) { goto out; } ret = ERROR_SUCCESS; out: crypto_free_hash(desc.tfm); return ret; }
/* * Key Derivation, from RFC 3078, RFC 3079. * Equivalent to Get_Key() for MS-CHAP as described in RFC 3079. */ static void get_new_key_from_sha(struct ppp_mppe_state * state) { struct hash_desc desc; struct scatterlist sg[4]; unsigned int nbytes; sg_init_table(sg, 4); nbytes = setup_sg(&sg[0], state->master_key, state->keylen); nbytes += setup_sg(&sg[1], sha_pad->sha_pad1, sizeof(sha_pad->sha_pad1)); nbytes += setup_sg(&sg[2], state->session_key, state->keylen); nbytes += setup_sg(&sg[3], sha_pad->sha_pad2, sizeof(sha_pad->sha_pad2)); desc.tfm = state->sha1; desc.flags = 0; crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest); }
int khash(char *buf, int key_len, char *hash) //khash - Kernel level hash function { struct scatterlist sg; struct crypto_hash *hash_tfm; struct hash_desc desc; int ret; hash_tfm = crypto_alloc_hash("md5", 0, 0); if(IS_ERR(hash_tfm)) {return PTR_ERR(hash_tfm); } desc.tfm = hash_tfm; sg_set_buf(&sg, buf, key_len); memset(hash, 0, key_len); ret = crypto_hash_digest(&desc, &sg, key_len, hash); crypto_free_hash(hash_tfm); return ret; }
static u32 qfprom_secdat_read(void) { struct file *fp; int cnt=0; u32 ret = RET_OK; mm_segment_t old_fs=get_fs(); #ifdef CONFIG_LGE_QFPROM_SECHASH struct crypto_hash *tfm = NULL; struct hash_desc desc; struct scatterlist sg[FUSEPROV_SEC_STRUCTURE_MAX_NUM]; char result[32]={0}; unsigned char temp_buf[4]={0}; unsigned char config_hash[32]={0}; int i=0; int temp=0; int sg_idx=0; int segment_size=0; #else printk(KERN_ERR "[QFUSE]%s : CONFIG_LGE_QFPROM_SECHASH is not exist\n", __func__); return RET_ERR; #endif printk(KERN_INFO "[QFUSE]%s start\n", __func__); mutex_lock(&secdat_lock); if(secdat.pentry != NULL){ printk(KERN_INFO "[QFUSE]%s : secdata file already loaded \n", __func__); mutex_unlock(&secdat_lock); return RET_OK; } set_fs(KERNEL_DS); fp=filp_open(SEC_PATH, O_RDONLY, S_IRUSR); if(IS_ERR(fp)){ int temp_err=0; temp_err =PTR_ERR(fp); printk(KERN_ERR "[QFUSE]%s : secdata file open error : %d\n", __func__, temp_err); ret = RET_ERR; goto err; } tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)){ printk(KERN_ERR "[QFUSE]%s :hash alloc error\n", __func__); ret = RET_ERR; goto err_mem; } desc.tfm=tfm; desc.flags=0; if (crypto_hash_init(&desc) != 0){ printk(KERN_ERR "[QFUSE]%s : hash init error\n", __func__); ret = RET_ERR; goto err_mem; } sg_init_table(sg, ARRAY_SIZE(sg)); fp->f_pos = 0; cnt = vfs_read(fp,(char*)&secdat.hdr, sizeof(secdat.hdr),&fp->f_pos); if(cnt != sizeof(secdat.hdr)){ printk(KERN_ERR "[QFUSE]%s : hdr read error\n", __func__); ret = RET_ERR; goto err_mem; } sg_set_buf(&sg[sg_idx++], (const char*)&secdat.hdr, sizeof(fuseprov_secdat_hdr_type)); if(secdat.hdr.revision >= 2 && secdat.hdr.segment_number !=0) { for(i=0; i < secdat.hdr.segment_number ; i++) { cnt = vfs_read(fp, (char*)&secdat.segment, sizeof(secdat.segment),&fp->f_pos); if(cnt != sizeof(secdat.segment)){ printk(KERN_ERR "[QFUSE]%s : segment read error\n", __func__); ret = RET_ERR; goto err_mem; } sg_set_buf(&sg[sg_idx++], (const char*)&secdat.segment, sizeof(fuseprov_secdat_hdr_segment_type)); } segment_size = cnt; } cnt = vfs_read(fp, (char*)&secdat.list_hdr, sizeof(secdat.list_hdr),&fp->f_pos); if(cnt != sizeof(secdat.list_hdr)){ printk(KERN_ERR "[QFUSE]%s : list_hdr read error\n", __func__); ret = RET_ERR; goto err_mem; } sg_set_buf(&sg[sg_idx++], (const char*)&secdat.list_hdr, sizeof(fuseprov_qfuse_list_hdr_type)); if(secdat.list_hdr.size > 0 && secdat.list_hdr.fuse_count > 0 && secdat.list_hdr.fuse_count <= FUSEPROV_INFO_MAX_SIZE){ secdat.pentry = kmalloc(secdat.list_hdr.size, GFP_KERNEL); if(secdat.pentry != NULL){ memset(secdat.pentry, 0, secdat.list_hdr.size); cnt = vfs_read(fp, (char *)secdat.pentry, secdat.list_hdr.size,&fp->f_pos); if(cnt != secdat.list_hdr.size){ printk(KERN_ERR "[QFUSE]%s : fuseprov_pentry read error\n", __func__); kfree(secdat.pentry); ret = RET_ERR; goto err_mem; } sg_set_buf(&sg[sg_idx++], (const char*)secdat.pentry, secdat.list_hdr.size); }else{ printk(KERN_ERR "[QFUSE]%s : kmalloc pentry error\n", __func__); ret = RET_ERR; goto err_mem; } }else{ printk(KERN_ERR "[QFUSE]%s : invalid header", __func__); printk(KERN_ERR "[QFUSE]hdr.magic1 : 0x%08X\n", secdat.hdr.magic1); printk(KERN_ERR "[QFUSE] .magic2 : 0x%08X\n", secdat.hdr.magic2); printk(KERN_ERR "[QFUSE] .revision : 0x%08X\n", secdat.hdr.revision); printk(KERN_ERR "[QFUSE] .size : 0x%08X\n", secdat.hdr.size); printk(KERN_ERR "[QFUSE] .segment_num : 0x%08X\n", secdat.hdr.segment_number); if(secdat.hdr.revision >= 2 && secdat.hdr.segment_number !=0){ printk(KERN_ERR "[QFUSE]segment.offset : 0x%08X\n", secdat.segment.offset); printk(KERN_ERR "[QFUSE] .type : 0x%08X\n", secdat.segment.type); printk(KERN_ERR "[QFUSE] .attribute : 0x%08X\n", secdat.segment.attribute); } printk(KERN_ERR "[QFUSE]list_hdr.revision : 0x%08X\n", secdat.list_hdr.revision); printk(KERN_ERR "[QFUSE] .size : 0x%08X\n", secdat.list_hdr.size); printk(KERN_ERR "[QFUSE] .fuse_cnt : 0x%08X, %d\n", secdat.list_hdr.fuse_count, secdat.list_hdr.fuse_count); ret = RET_ERR; goto err_mem; } cnt = vfs_read(fp,(char*)&secdat.footer, sizeof(secdat.footer),&fp->f_pos); if(cnt != sizeof(secdat.footer)){ printk(KERN_ERR "[QFUSE]%s : fuseprov_footer read error\n", __func__); ret = RET_ERR; goto err_mem; } sg_set_buf(&sg[sg_idx], (const char*)&secdat.footer, sizeof(fuseprov_secdat_footer_type)); if(crypto_hash_digest(&desc, sg, sizeof(fuseprov_secdat_hdr_type)+segment_size+secdat.hdr.size, result) != 0){ printk(KERN_ERR "[QFUSE]%s : hash_digest error\n", __func__); ret = RET_ERR; goto err_mem; } for(i=0;i<64;i=i+2){ memset(temp_buf, 0, 4); memcpy(temp_buf, CONFIG_LGE_QFPROM_SECHASH+i, 2); sscanf(temp_buf, "%x", &temp); config_hash[i/2] = temp; } if(strncmp(result, config_hash, sizeof(result))!=0){ printk(KERN_ERR "[QFUSE]%s : sec hash different\n", __func__); printk(KERN_ERR "[QFUSE]partition hash : %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", result[0],result[1],result[2],result[3],result[4],result[5],result[6],result[7], result[8],result[9],result[10],result[11],result[12],result[13],result[14],result[15], result[16],result[17],result[18],result[19],result[20],result[21],result[22],result[23], result[24],result[25],result[26],result[27],result[28],result[29],result[30],result[31]); printk(KERN_ERR "[QFUSE]config hash : %s\n",CONFIG_LGE_QFPROM_SECHASH); ret = RET_ERR; goto err_mem; } err_mem: if(tfm) crypto_free_hash(tfm); if(ret == RET_ERR && secdat.pentry){ kfree(secdat.pentry); secdat.pentry=NULL; } if(fp) filp_close(fp, NULL); err: set_fs(old_fs); mutex_unlock(&secdat_lock); printk(KERN_INFO "[QFUSE]%s end\n", __func__); return ret; }
static int tf_self_test_integrity(const char *alg_name, struct module *mod) { unsigned char expected[32]; unsigned char actual[32]; struct scatterlist *sg = NULL; struct hash_desc desc = {NULL, 0}; size_t digest_length; unsigned char *const key = tf_integrity_hmac_sha256_key; size_t const key_length = sizeof(tf_integrity_hmac_sha256_key); int error; if (mod->raw_binary_ptr == NULL) return -ENXIO; if (tf_integrity_hmac_sha256_expected_value == NULL) return -ENOENT; INFO("expected=%s", tf_integrity_hmac_sha256_expected_value); error = scan_hex(expected, sizeof(expected), tf_integrity_hmac_sha256_expected_value); if (error < 0) { pr_err("tf_driver: Badly formatted hmac_sha256 parameter " "(should be a hex string)\n"); return -EIO; }; desc.tfm = crypto_alloc_hash(alg_name, 0, 0); if (IS_ERR_OR_NULL(desc.tfm)) { ERROR("crypto_alloc_hash(%s) failed", alg_name); error = (desc.tfm == NULL ? -ENOMEM : (int)desc.tfm); goto abort; } digest_length = crypto_hash_digestsize(desc.tfm); INFO("alg_name=%s driver_name=%s digest_length=%u", alg_name, crypto_tfm_alg_driver_name(crypto_hash_tfm(desc.tfm)), digest_length); error = crypto_hash_setkey(desc.tfm, key, key_length); if (error) { ERROR("crypto_hash_setkey(%s) failed: %d", alg_name, error); goto abort; } sg = vmalloc_to_sg(mod->raw_binary_ptr, mod->raw_binary_size); if (IS_ERR_OR_NULL(sg)) { ERROR("vmalloc_to_sg(%lu) failed: %d", mod->raw_binary_size, (int)sg); error = (sg == NULL ? -ENOMEM : (int)sg); goto abort; } error = crypto_hash_digest(&desc, sg, mod->raw_binary_size, actual); if (error) { ERROR("crypto_hash_digest(%s) failed: %d", alg_name, error); goto abort; } kfree(sg); crypto_free_hash(desc.tfm); #ifdef CONFIG_TF_DRIVER_FAULT_INJECTION if (tf_fault_injection_mask & TF_CRYPTO_ALG_INTEGRITY) { pr_warning("TF: injecting fault in integrity check!\n"); actual[0] = 0xff; actual[1] ^= 0xff; } #endif TF_TRACE_ARRAY(expected, digest_length); TF_TRACE_ARRAY(actual, digest_length); if (memcmp(expected, actual, digest_length)) { ERROR("wrong %s digest value", alg_name); error = -EINVAL; } else { INFO("%s: digest successful", alg_name); error = 0; } return error; abort: if (!IS_ERR_OR_NULL(sg)) kfree(sg); if (!IS_ERR_OR_NULL(desc.tfm)) crypto_free_hash(desc.tfm); return error == -ENOMEM ? error : -EIO; }