/* * _sym_resinsert() * Insert an already-allocated string into a symbol table */ static void _sym_reinsert(struct symbol *sp, const char *ptr) { int hash = hashval((unsigned const char *)ptr); int start, x; start = x = hash % sp->sym_size; do { /* * When find an open slot, this is where we'll put * this new string. */ if (sp->sym_strings[x] == NULL) { sp->sym_strings[x] = ptr; return; } /* * Advance to next slot */ if (++x >= sp->sym_size) { x = 0; } } while (x != start); /* * Shouldn't happen */ abort(); }
static net_dev_stats *hash_lookup(char *devname, size_t nlen) { int hval; net_dev_stats *stats; char *name=strndup(devname, nlen); hval = hashval(name); for (stats = netstats[hval]; stats != NULL; stats = stats->next) { if (strcmp(name, stats->name) == 0) { free(name); return stats; } } stats = (net_dev_stats *)malloc(sizeof(net_dev_stats)); if ( stats == NULL ) { err_msg("unable to allocate memory for /proc/net/dev/stats in hash_lookup(%s,%d)", name, nlen); free(name); return NULL; } stats->name = strndup(devname,nlen); stats->rpi = 0; stats->rpo = 0; stats->rbi = 0; stats->rbo = 0; stats->next = netstats[hval]; netstats[hval] = stats; free(name); return stats; }
datum_t * hash_lookup (datum_t *key, hash_t * hash) { size_t i; datum_t *val; node_t *bucket; i = hashval(key, hash); ck_rwlock_read_lock(&hash->lock[i]); bucket = &hash->node[i]; if ( bucket == NULL ) { ck_rwlock_read_unlock(&hash->lock[i]); return NULL; } for (; bucket != NULL; bucket = bucket->next) { if (bucket->key && hash_keycmp(hash, key, bucket->key)) { val = datum_dup( bucket->val ); ck_rwlock_read_unlock(&hash->lock[i]); return val; } } ck_rwlock_read_unlock(&hash->lock[i]); return NULL; }
RTClass* getRTClass(char *classname) { RTHash *he; for(he = rttbl[hashval(classname)]; he; he = he->next) { if(strcmp(classname, he->rtc->classname) == 0) return he->rtc; } return addRTClass(classname); }
datum_t * hash_delete (datum_t *key, hash_t * hash) { size_t i; node_t *bucket, *last = NULL; i = hashval(key,hash); ck_rwlock_write_lock(&hash->lock[i]); bucket = &hash->node[i]; if (bucket->key == NULL ) { ck_rwlock_write_unlock(&hash->lock[i]); return NULL; } for (; bucket != NULL; last = bucket, bucket = bucket->next) { node_t tmp; if (bucket == &hash->node[i]) { tmp.key = bucket->key; tmp.val = bucket->val; if (bucket->next) { bucket->key = bucket->next->key; bucket->val = bucket->next->val; bucket->next = bucket->next->next; } else { memset(bucket, 0, sizeof(*bucket)); } datum_free(tmp.key); ck_rwlock_write_unlock(&hash->lock[i]); } else { last->next = bucket->next; datum_free(bucket->key); tmp.val = bucket->val; free(bucket); ck_rwlock_write_unlock(&hash->lock[i]); } return tmp.val; } ck_rwlock_write_unlock(&hash->lock[i]); return NULL; }
static RTClass* addRTClass(char *classname) { uint h; RTHash *he; h = hashval(classname); he = Malloc(sizeof(RTHash)); he->rtc = newRTClass(classname); he->next = rttbl[h]; rttbl[h] = he; return he->rtc; }
hashval SHA3<256>::ComputeHash(const ConstBuf& mb) { #if UCFG_IMP_SHA3=='S' UInt32 hash[8]; sph_keccak256_context ctx; sph_keccak256_init(&ctx); sph_keccak256(&ctx, mb.P, mb.Size); sph_keccak256_close(&ctx, hash); return hashval((const byte*)hash, sizeof hash); #else Throw(E_NOTIMPL); #endif }
int walkDupCodeNext(AstNode* node1p, AstNode* node2p, int level) { // Find number of common statements between the two node1p_nextp's... if (node1p->user1p() || node2p->user1p()) return 0; // Already iterated if (node1p->user3p() || node2p->user3p()) return 0; // Already merged if (!m_hashed.sameNodes(node1p,node2p)) return 0; // walk of tree has same comparison V3Hash hashval(node1p->user4p()); //UINFO(9," wdup1 "<<level<<" "<<V3Hash(node1p->user4p())<<" "<<node1p<<endl); //UINFO(9," wdup2 "<<level<<" "<<V3Hash(node2p->user4p())<<" "<<node2p<<endl); m_walkLast1p = node1p; m_walkLast2p = node2p; node1p->user1(true); node2p->user1(true); if (node1p->nextp() && node2p->nextp()) { return hashval.depth()+walkDupCodeNext(node1p->nextp(), node2p->nextp(), level+1); } return hashval.depth(); }
/* * sym_lookup() * Turn string into unique pointer */ const char * sym_lookup(struct symbol *sp, const char *ptr) { int hash = hashval((unsigned const char *)ptr); int start, x; const char *p; struct symbol *sp2; /* * Scan starting at this hash point for our string */ start = x = hash % sp->sym_size; do { /* * If find an open slot, this is where we'll put * this new string. */ p = sp->sym_strings[x]; if (!p) { return(sp->sym_strings[x] = strdup(ptr)); } /* * See if the entry matches */ if (!strcmp(p, ptr)) { return(p); } /* * Advance to next slot */ if (++x >= sp->sym_size) { x = 0; } } while (x != start); /* * We are full; resize to next growth increment and * re-insert contents. */ sp2 = calloc(1, sizeof(struct symbol)); if (!sp2) { return(NULL); } /* * Choose growth increment */ if (sp->sym_size*2 > MAX_INCREMENT) { x = sp->sym_size + MAX_INCREMENT; } else { x = sp->sym_size * 2; } /* * Allocate hash array */ sp2->sym_strings = calloc(x, sizeof(char *)); if (sp2->sym_strings == NULL) { free(sp2); return(NULL); } sp2->sym_size = x; /* * Insert all strings, then switch over to new structure. */ for (x = 0; x < sp->sym_size; ++x) { _sym_reinsert(sp2, sp->sym_strings[x]); } free(sp->sym_strings); *sp = *sp2; free(sp2); /* * Now recurse to try and fit our requested string in again. */ return(sym_lookup(sp, ptr)); }
datum_t * hash_insert (datum_t *key, datum_t *val, hash_t *hash) { size_t i; node_t *bucket; i = hashval(key, hash); ck_rwlock_write_lock(&hash->lock[i]); bucket = &hash->node[i]; if (bucket->key == NULL) { /* This bucket hasn't been used yet */ bucket->key = datum_dup(key); if ( bucket->key == NULL ) { free(bucket); bucket = NULL; ck_rwlock_write_unlock(&hash->lock[i]); return NULL; } bucket->val = datum_dup(val); if ( bucket->val == NULL ) { free(bucket); bucket = NULL; ck_rwlock_write_unlock(&hash->lock[i]); return NULL; } ck_rwlock_write_unlock(&hash->lock[i]); return bucket->val; } /* This node in the hash is already in use. Collision or new data for existing key. */ for (; bucket != NULL; bucket = bucket->next) { if(bucket->key && hash_keycmp(hash, bucket->key, key)) { /* New data for an existing key */ /* Make sure we have enough space */ if ( bucket->val->size < val->size ) { /* Make sure we have enough room */ if(! (bucket->val->data = realloc(bucket->val->data, val->size)) ) { ck_rwlock_write_unlock(&hash->lock[i]); return NULL; } bucket->val->size = val->size; } memset( bucket->val->data, 0, val->size ); memcpy( bucket->val->data, val->data, val->size ); ck_rwlock_write_unlock(&hash->lock[i]); return bucket->val; } } /* It's a Hash collision... link it in the collided bucket */ bucket = calloc(1, sizeof(*bucket)); if (bucket == NULL) { ck_rwlock_write_unlock(&hash->lock[i]); return NULL; } bucket->key = datum_dup (key); if ( bucket->key == NULL ) { free(bucket); ck_rwlock_write_unlock(&hash->lock[i]); return NULL; } bucket->val = datum_dup (val); if ( bucket->val == NULL ) { datum_free(bucket->key); free(bucket); ck_rwlock_write_unlock(&hash->lock[i]); return NULL; } bucket->next = hash->node[i].next; hash->node[i].next = bucket; ck_rwlock_write_unlock(&hash->lock[i]); return bucket->val; }
// Compute an index to the hauristic function. // \param mask specifies the pancakes to consider if using a relative // order abstraction. Ignored if using a object location abstraction. uint32 pancake::hindex(uint32 mask) const { if(pdb_type_ == ORD) return hord_index(mask); else return hashval(); }