/** * @brief Calculate the EP_UID from the IMS (correct version) * * This is the EP_UID calculation, correctly implemented - usable * on post-ES3 chips * * @param ims_value A pointer to the 35-byte IMS value * @param ep_uid A pointer to the octet EP_UID output value */ void calculate_epuid(uint8_t * ims_value, mcl_octet * ep_uid) { /* same code used in ES3 boot ROM to generate the EUID */ int i; static uint8_t ep_uid_calc[SHA256_HASH_DIGEST_SIZE]; static uint8_t y1[SHA256_HASH_DIGEST_SIZE]; static uint8_t z0[SHA256_HASH_DIGEST_SIZE]; uint32_t temp; uint32_t *pims = (uint32_t *)ims_value; hash_start(); /* grab IMS 4 bytes at a time and feed that to hash_update */ for (i = 0; i < 4; i++) { temp = pims[i] ^ 0x3d3d3d3d; hash_update((uint8_t *)&temp, sizeof(temp)); } hash_final(y1); hash_start(); hash_update(y1, SHA256_HASH_DIGEST_SIZE); temp = 0x01010101; for (i = 0; i < 8; i++) {; hash_update((uint8_t *)&temp, sizeof(temp)); } hash_final(z0); hash_it(z0, SHA256_HASH_DIGEST_SIZE, ep_uid_calc); memcpy(ep_uid->val, ep_uid_calc, EP_UID_SIZE); ep_uid->len = EP_UID_SIZE; }
void print(FILE* output, atom_p atom) { switch(atom->type) { case T_NIL: fprintf(output, "nil"); break; case T_TRUE: fprintf(output, "true"); break; case T_FALSE: fprintf(output, "false"); break; case T_SYM: fprintf(output, "%s", atom->sym); break; case T_NUM: fprintf(output, "%lf", atom->num); break; case T_STR: fprintf(output, "\"%s\"", atom->str); break; case T_ARRAY: fprintf(output, "("); for(size_t i = 0; i < atom->array.len; i++) { print(output, atom->array.ptr[i]); if (i != atom->array.len - 1) fprintf(output, " "); } fprintf(output, ")"); break; case T_OBJ: case T_ENV: fprintf(output, "{"); const char* sep = ""; for(obj_slot_p it = hash_start(atom); it != NULL; it = hash_next(atom, it)) { fprintf(output, "%s %s: ", sep, it->key); print(output, it->value); sep = ","; } fprintf(output, " }"); break; case T_ERROR: fprintf(output, "ERROR: %s", atom->error); break; case T_BUILTIN: fprintf(output, "<builtin %p>", atom->builtin); break; case T_SYNTAX: fprintf(output, "<syntax %p>", atom->syntax); break; case T_LAMBDA: fprintf(output, "<lambda "); print(output, atom->lambda.args); fprintf(output, " "); print(output, atom->lambda.body); fprintf(output, " >"); break; } }
static int verify_object(struct conf *conf, struct manifest *mf, struct object *obj, struct hashtable *stated_files, struct hashtable *hashed_files) { for (uint32_t i = 0; i < obj->n_file_info_indexes; i++) { struct file_info *fi = &mf->file_infos[obj->file_info_indexes[i]]; char *path = mf->files[fi->index]; struct file_stats *st = hashtable_search(stated_files, path); if (!st) { struct stat file_stat; if (x_stat(path, &file_stat) != 0) { return 0; } st = x_malloc(sizeof(*st)); st->size = file_stat.st_size; st->mtime = file_stat.st_mtime; st->ctime = file_stat.st_ctime; hashtable_insert(stated_files, x_strdup(path), st); } if (fi->size != st->size) { return 0; } if (conf->sloppiness & SLOPPY_FILE_STAT_MATCHES) { if (fi->mtime == st->mtime && fi->ctime == st->ctime) { cc_log("mtime/ctime hit for %s", path); continue; } else { cc_log("mtime/ctime miss for %s", path); } } struct file_hash *actual = hashtable_search(hashed_files, path); if (!actual) { struct mdfour hash; hash_start(&hash); int result = hash_source_code_file(conf, &hash, path); if (result & HASH_SOURCE_CODE_ERROR) { cc_log("Failed hashing %s", path); return 0; } if (result & HASH_SOURCE_CODE_FOUND_TIME) { return 0; } actual = x_malloc(sizeof(*actual)); hash_result_as_bytes(&hash, actual->hash); actual->size = hash.totalN; hashtable_insert(hashed_files, x_strdup(path), actual); } if (memcmp(fi->hash, actual->hash, mf->hash_size) != 0 || fi->size != actual->size) { return 0; } } return 1; }
term_t scheduler_list_registered(heap_t *hp) { hash_index_t hi; hash_start(named_processes, &hi); proc_t *proc; term_t regs = nil; while ((proc = hash_next(&hi)) != 0) regs = heap_cons(hp, proc->name, regs); return regs; }
term_t scheduler_list_processes(heap_t *hp) { hash_index_t hi; hash_start(registry, &hi); proc_t *proc; term_t ps = nil; while ((proc = hash_next(&hi)) != 0) ps = heap_cons(hp, proc->pid, ps); return ps; }
static void calculate_y2(uint8_t *ims, uint8_t *y2) { /* Y2 = sha256(IMS[0:31] xor copy(0x5a, 32)) */ uint32_t i; uint32_t temp; uint32_t *pims = (uint32_t *)ims; hash_start(); /* grab IMS 4bytes at a time and feed that to hash_update */ for (i = 0; i < 8; i++) { temp = pims[i] ^ 0x5a5a5a5a; hash_update((unsigned char *)&temp, sizeof(temp)); } hash_final(y2); }
void text_renderer_destroy(text_renderer_p renderer) { texture_destroy(renderer->texture); for(size_t i = 0; i < renderer->lines->length; i++) array_destroy( array_elem(renderer->lines, text_renderer_line_t, i).cells ); array_destroy(renderer->lines); for(hash_elem_t e = hash_start(renderer->fonts); e != NULL; e = hash_next(renderer->fonts, e)) text_renderer_font_destroy(renderer, hash_key(e)); hash_destroy(renderer->fonts); FT_Error error = FT_Done_FreeType(renderer->freetype); if (error) printf("FT_Done_FreeType error\n"); }
void hash_resize(atom_p obj, uint32_t new_capacity) { assert(obj->type == T_OBJ || obj->type == T_ENV); atom_t new_obj; new_obj.type = obj->type; new_obj.obj.parent = obj->obj.parent; new_obj.obj.len = 0; new_obj.obj.cap = new_capacity; new_obj.obj.deleted = 0; new_obj.obj.slots = gc_alloc_zeroed(new_capacity * sizeof(new_obj.obj.slots[0])); for(obj_slot_p it = hash_start(obj); it != NULL; it = hash_next(obj, it)) { hash_set(&new_obj, it->key, it->value); } *obj = new_obj; }
/********************************************************************** * %FUNCTION: peer_find * %ARGUMENTS: * addr -- IP address of peer * hostname -- AVP peer hostname * %RETURNS: * A pointer to the peer with given IP address, or NULL if not found. * %DESCRIPTION: * Searches peer hash table for specified peer. ***********************************************************************/ l2tp_peer * l2tp_peer_find(struct sockaddr_in *addr, char const *peername) { void *cursor; l2tp_peer *peer = NULL; l2tp_peer *candidate = NULL; char addr1_str[16], addr2_str[16]; for (candidate = hash_start(&all_peers, &cursor); candidate ; candidate = hash_next(&all_peers, &cursor)) { unsigned long mask = candidate->mask_bits ? htonl(0xFFFFFFFFUL << (32 - candidate->mask_bits)) : 0; strcpy(addr1_str, inet_ntoa(addr->sin_addr)); strcpy(addr2_str, inet_ntoa(candidate->addr.sin_addr)); DBG(l2tp_db(DBG_TUNNEL, "l2tp_peer_find(%s) examining peer %s/%d\n", addr1_str, addr2_str, candidate->mask_bits)); if ((candidate->addr.sin_addr.s_addr & mask) == (addr->sin_addr.s_addr & mask) && (!peername || !(candidate->peername[0]) || !strcmp(peername,candidate->peername))) { if (peer == NULL) { peer = candidate; } else { if (peer->mask_bits < candidate->mask_bits) peer = candidate; } } } strcpy(addr1_str, inet_ntoa(addr->sin_addr)); if (peer != NULL) strcpy(addr2_str, inet_ntoa(peer->addr.sin_addr)); DBG(l2tp_db(DBG_TUNNEL, "l2tp_peer_find(%s) found %s/%d\n", addr1_str, peer == NULL ? "NULL" : addr2_str, peer == NULL ? -1 : peer->mask_bits)); return peer; }
static int verify_object(struct conf *conf, struct manifest *mf, struct object *obj, struct hashtable *hashed_files) { uint32_t i; struct file_info *fi; struct {int result; struct file_hash fh;} *actual; struct mdfour hash; int result; for (i = 0; i < obj->n_file_info_indexes; i++) { fi = &mf->file_infos[obj->file_info_indexes[i]]; actual = hashtable_search(hashed_files, mf->files[fi->index]); if (!actual) { actual = x_malloc(sizeof(*actual)); hash_start(&hash); result = hash_source_code_file(conf, &hash, mf->files[fi->index]); if (result & HASH_SOURCE_CODE_ERROR) { cc_log("Failed hashing %s", mf->files[fi->index]); cloud_hook_reset_includes(); free(actual); return 0; } if (result & HASH_SOURCE_CODE_FOUND_TIME) { cloud_hook_reset_includes(); free(actual); return 0; } actual->result = result; hash_result_as_bytes(&hash, actual->fh.hash); actual->fh.size = hash.totalN; hashtable_insert(hashed_files, x_strdup(mf->files[fi->index]), actual); } if (memcmp(fi->hash, actual->fh.hash, mf->hash_size) != 0 || fi->size != actual->fh.size) { cloud_hook_reset_includes(); return 0; } /* Passing the hash here is an optimization, but it's not the right hash if a time macro was present. */ cloud_hook_include_file(mf->files[fi->index], actual->result ? NULL : &actual->fh); } return 1; }
core::stringc CHashMD5::quickHash(core::stringc str) { hash_start(); hash_append((u8*)str.c_str(), str.size()); u8 digest[16]; hash_finish(digest, 16); c8 retstr[33]; memset(retstr, 0, 33); c8* temp = retstr; for(int i = 0; i < 16; i++) { if((digest[i] & 0xff) > 0xf){ sprintf(temp, "%x", (digest[i] & 0xff)); }else{ sprintf(temp, "0%x", (digest[i] & 0xff)); } temp += 2; } core::stringc ret(retstr); return ret; };
static int verify_object(struct conf *conf, struct manifest *mf, struct object *obj, struct hashtable *hashed_files) { uint32_t i; struct file_info *fi; struct file_hash *actual; struct mdfour hash; int result; for (i = 0; i < obj->n_file_info_indexes; i++) { fi = &mf->file_infos[obj->file_info_indexes[i]]; actual = hashtable_search(hashed_files, mf->files[fi->index]); if (!actual) { actual = x_malloc(sizeof(*actual)); hash_start(&hash); result = hash_source_code_file(conf, &hash, mf->files[fi->index]); if (result & HASH_SOURCE_CODE_ERROR) { cc_log("Failed hashing %s", mf->files[fi->index]); free(actual); return 0; } if (result & HASH_SOURCE_CODE_FOUND_TIME) { free(actual); return 0; } hash_result_as_bytes(&hash, actual->hash); actual->size = hash.totalN; hashtable_insert(hashed_files, x_strdup(mf->files[fi->index]), actual); } if (memcmp(fi->hash, actual->hash, mf->hash_size) != 0 || fi->size != actual->size) { return 0; } } return 1; }
/* find the hash for a command. The hash includes all argument lists, plus the output from running the compiler with -E */ static void find_hash(ARGS *args) { int i; char *path_stdout, *path_stderr; char *hash_dir; char *s; struct stat st; int status; int nlevels = 2; char *input_base; char *tmp; if ((s = getenv("CCACHE_NLEVELS"))) { nlevels = atoi(s); if (nlevels < 1) nlevels = 1; if (nlevels > 8) nlevels = 8; } hash_start(); /* when we are doing the unifying tricks we need to include the input file name in the hash to get the warnings right */ if (enable_unify) { hash_string(input_file); } /* we have to hash the extension, as a .i file isn't treated the same by the compiler as a .ii file */ hash_string(i_extension); /* first the arguments */ for (i=1;i<args->argc;i++) { /* some arguments don't contribute to the hash. The theory is that these arguments will change the output of -E if they are going to have any effect at all, or they only affect linking */ if (i < args->argc-1) { if (strcmp(args->argv[i], "-I") == 0 || strcmp(args->argv[i], "-include") == 0 || strcmp(args->argv[i], "-L") == 0 || strcmp(args->argv[i], "-D") == 0 || strcmp(args->argv[i], "-idirafter") == 0 || strcmp(args->argv[i], "-isystem") == 0) { i++; continue; } } if (strncmp(args->argv[i], "-I", 2) == 0 || strncmp(args->argv[i], "-L", 2) == 0 || strncmp(args->argv[i], "-D", 2) == 0 || strncmp(args->argv[i], "-idirafter", 10) == 0 || strncmp(args->argv[i], "-isystem", 8) == 0) { continue; } if (strncmp(args->argv[i], "--specs=", 8) == 0 && stat(args->argv[i]+8, &st) == 0) { /* if given a explicit specs file, then hash that file, but don't include the path to it in the hash */ hash_file(args->argv[i]+8); continue; } /* all other arguments are included in the hash */ hash_string(args->argv[i]); } /* the compiler driver size and date. This is a simple minded way to try and detect compiler upgrades. It is not 100% reliable */ if (stat(args->argv[0], &st) != 0) { cc_log("Couldn't stat the compiler!? (argv[0]='%s')\n", args->argv[0]); stats_update(STATS_COMPILER); failed(); } /* also include the hash of the compiler name - as some compilers use hard links and behave differently depending on the real name */ if (st.st_nlink > 1) { hash_string(str_basename(args->argv[0])); } hash_int(st.st_size); hash_int(st.st_mtime); /* possibly hash the current working directory */ if (getenv("CCACHE_HASHDIR")) { char *cwd = gnu_getcwd(); if (cwd) { hash_string(cwd); free(cwd); } } /* ~/hello.c -> tmp.hello.123.i limit the basename to 10 characters in order to cope with filesystem with small maximum filename length limits */ input_base = str_basename(input_file); tmp = strchr(input_base, '.'); if (tmp != NULL) { *tmp = 0; } if (strlen(input_base) > 10) { input_base[10] = 0; } /* now the run */ x_asprintf(&path_stdout, "%s/%s.tmp.%s.%s", temp_dir, input_base, tmp_string(), i_extension); x_asprintf(&path_stderr, "%s/tmp.cpp_stderr.%s", temp_dir, tmp_string()); if (!direct_i_file) { /* run cpp on the input file to obtain the .i */ args_add(args, "-E"); args_add(args, input_file); status = execute(args->argv, path_stdout, path_stderr); args_pop(args, 2); } else { /* we are compiling a .i or .ii file - that means we can skip the cpp stage and directly form the correct i_tmpfile */ path_stdout = input_file; if (create_empty_file(path_stderr) != 0) { stats_update(STATS_ERROR); cc_log("failed to create empty stderr file\n"); failed(); } status = 0; } if (status != 0) { if (!direct_i_file) { unlink(path_stdout); } unlink(path_stderr); cc_log("the preprocessor gave %d\n", status); stats_update(STATS_PREPROCESSOR); failed(); } /* if the compilation is with -g then we have to include the whole of the preprocessor output, which means we are sensitive to line number information. Otherwise we can discard line number info, which makes us less sensitive to reformatting changes Note! I have now disabled the unification code by default as it gives the wrong line numbers for warnings. Pity. */ if (!enable_unify) { hash_file(path_stdout); } else { if (unify_hash(path_stdout) != 0) { stats_update(STATS_ERROR); failed(); } } hash_file(path_stderr); i_tmpfile = path_stdout; if (!getenv("CCACHE_CPP2")) { /* if we are using the CPP trick then we need to remember this stderr data and output it just before the main stderr from the compiler pass */ cpp_stderr = path_stderr; } else { unlink(path_stderr); free(path_stderr); } /* we use a N level subdir for the cache path to reduce the impact on filesystems which are slow for large directories */ s = hash_result(); x_asprintf(&hash_dir, "%s/%c", cache_dir, s[0]); x_asprintf(&stats_file, "%s/stats", hash_dir); for (i=1; i<nlevels; i++) { char *p; if (create_dir(hash_dir) != 0) { cc_log("failed to create %s\n", hash_dir); failed(); } x_asprintf(&p, "%s/%c", hash_dir, s[i]); free(hash_dir); hash_dir = p; } if (create_dir(hash_dir) != 0) { cc_log("failed to create %s\n", hash_dir); failed(); } x_asprintf(&hashname, "%s/%s", hash_dir, s+nlevels); free(hash_dir); }
static int verify_object(struct conf *conf, struct manifest *mf, struct object *obj, struct hashtable *stated_files, struct hashtable *hashed_files) { uint32_t i; struct file_info *fi; struct file_hash *actual; struct file_stats *st; struct mdfour hash; int result; char *path; for (i = 0; i < obj->n_file_info_indexes; i++) { fi = &mf->file_infos[obj->file_info_indexes[i]]; path = mf->files[fi->index]; st = hashtable_search(hashed_files, path); if (!st) { struct stat file_stat; if (stat(path, &file_stat) == -1) { cc_log("Failed to stat include file %s: %s", path, strerror(errno)); return 0; } st = x_malloc(sizeof(*st)); st->size = file_stat.st_size; st->mtime = file_stat.st_mtime; st->ctime = file_stat.st_ctime; hashtable_insert(stated_files, x_strdup(path), st); } if (conf->sloppiness & SLOPPY_FILE_STAT_MATCHES) { /* * st->ctime is sometimes 0, so we can't check that both st->ctime and * st->mtime are greater than time_of_compilation. But it's sufficient to * check that either is. */ if (fi->size == st->size && fi->mtime == st->mtime && fi->ctime == st->ctime && MAX(st->mtime, st->ctime) >= time_of_compilation) { cc_log("size/mtime/ctime hit for %s", path); continue; } else { cc_log("size/mtime/ctime miss for %s", path); } } actual = hashtable_search(hashed_files, path); if (!actual) { actual = x_malloc(sizeof(*actual)); hash_start(&hash); result = hash_source_code_file(conf, &hash, path); if (result & HASH_SOURCE_CODE_ERROR) { cc_log("Failed hashing %s", path); free(actual); return 0; } if (result & HASH_SOURCE_CODE_FOUND_TIME) { free(actual); return 0; } hash_result_as_bytes(&hash, actual->hash); actual->size = hash.totalN; hashtable_insert(hashed_files, x_strdup(path), actual); } if (memcmp(fi->hash, actual->hash, mf->hash_size) != 0 || fi->size != actual->size) { return 0; } } return 1; }