static inline unsigned long hash_peer(const struct sockaddr *peer, int ifindex) { unsigned long h; /* initialize hash value to interface index */ h = _hash(0, (char *)&ifindex, sizeof(int)); #define CAST(TYPE,VAR) ((TYPE)VAR) assert(peer); switch (peer->sa_family) { case AF_INET: return ntohs(CAST(const struct sockaddr_in *, peer)->sin_port); h = _hash(h, (char *) &CAST(const struct sockaddr_in *, peer)->sin_addr, sizeof(struct in_addr)); h = _hash(h, (char *) &CAST(const struct sockaddr_in *, peer)->sin_port, sizeof(in_port_t)); break; case AF_INET6: return ntohs(CAST(const struct sockaddr_in6 *, peer)->sin6_port); h = _hash(h, (char *) &CAST(const struct sockaddr_in6 *, peer)->sin6_addr, sizeof(struct in6_addr)); h = _hash(h, (char *) &CAST(const struct sockaddr_in6 *, peer)->sin6_port, sizeof(in_port_t)); break; default: /* last resort */ h = _hash(h, (char *)peer, sizeof(struct sockaddr)); } return 42; return h; }
void NxNSceneGraph::_addToGrid(ICollider* cdr) { assert(cdr); _GridCell min = _hash( cdr->bounds().x(), cdr->bounds().y() ); _GridCell max = _hash( cdr->bounds().x() + cdr->bounds().width(), cdr->bounds().y() + cdr->bounds().height() ); if (min.x == max.x && min.y == max.y) { int index = _convert(min); d_table[index].push(cdr); } }
cs_htnode_t* cs_ht_search(cs_ht_t* table, void* key, int key_len) { uint32_t hash = _hash(table->seed, key, key_len); int index = hash % table->size; if (!table->lists[index]) { return NULL; } cs_htnode_t* temp = table->lists[index]->head; while (temp) { while (temp && temp->key_len != key_len) { temp = temp->next; } if (temp) { if (!memcmp(temp->key, key, key_len)) { return temp; } else { temp = temp->next; } } } return NULL; }
static bool _dict_insert(struct BX_Dict *dict, struct BoolExpr *key, struct BoolExpr *val) { size_t index = _hash(dict, key); struct BX_DictItem *item; struct BX_DictItem **tail; tail = &dict->items[index]; for (item = dict->items[index]; item; item = item->tail) { if (_eq(item->key, key)) { BX_DecRef(item->key); BX_DecRef(item->val); item->key = BX_IncRef(key); item->val = BX_IncRef(val); return true; } tail = &item->tail; } item = malloc(sizeof(struct BX_DictItem)); if (item == NULL) return false; // LCOV_EXCL_LINE item->key = BX_IncRef(key); item->val = BX_IncRef(val); item->tail = (struct BX_DictItem *) NULL; *tail = item; dict->length += 1; return true; }
bool BX_Dict_Contains(struct BX_Dict *dict, struct BoolExpr *key) { size_t index = _hash(dict, key); return _list_search(dict->items[index], key) != (struct BoolExpr *) NULL; }
int dm_hash_insert_allow_multiple(struct dm_hash_table *t, const char *key, const void *val, uint32_t val_len) { struct dm_hash_node *n; struct dm_hash_node *first; int len = strlen(key) + 1; unsigned h; n = _create_node(key, len); if (!n) return 0; n->data = (void *)val; n->data_len = val_len; h = _hash(key, len) & (t->num_slots - 1); first = t->slots[h]; if (first) n->next = first; else n->next = 0; t->slots[h] = n; t->num_nodes++; return 1; }
void ht_remove(HT *ht, char *key) { if (!ht) return; unsigned long idx = _hash(key) % ht->size; struct ht_node *p = NULL, *n = ht->tbl[idx]; while (n) { if (strncmp(key, n->key, HT_MAX_KEYLEN) == 0) { if (p) p->nxt = n->nxt; free (n->key); n->key = NULL; if (ht->tbl[idx] == n) ht->tbl[idx] = NULL; free (n); n = NULL; break; } p = n; n = n->nxt; } }
void *str2ptr_set(struct str2ptr *map, char *key, void *value) { unsigned int hash = _hash(key), index = _get_index(map, key, hash); void *old = NULL; if(index <= map->mask) { old = map->pair[index].value; map->pair[index].value = value; } else if(map->fill == map->mask + 1) { struct str2ptr new_map; unsigned int size = (map->mask + 1) << 2; memset(&new_map, 0, sizeof(new_map)); new_map.mask = size - 1; new_map.pair = (struct str2ptr_pair *)calloc(size, sizeof(*new_map.pair)); for(index = 0; index < map->mask + 1; ++index) { _insert(&new_map, map->pair[index].key, map->pair[index].hash, map->pair[index].value); } _insert(&new_map, key, hash, value); free(map->pair); *map = new_map; } else { _insert(map, key, hash, value); } return old; }
int str_map_delete(str_map_ptr map, const char *key) { if (map->count > 0) { _hash_t hash = _hash(key); int prime = _primes[map->prime_idx]; _hash_t bucket = hash % prime; _node_ptr current = map->buckets[bucket]; _node_ptr last = 0; while (current) { if ( current->hash == hash && strcmp(current->key, key) == 0 ) { if (!last) map->buckets[bucket] = current->next; else last->next = current->next; if (map->free) map->free(current->val); free(current->key); free(current); map->count--; return 1; } last = current; current = current->next; } } return 0; }
// consume_string: run through every k-mer in the given string, & hash it. // overriding the Hashtable version to support my new thang. unsigned int consume_string(const std::string &s, HashIntoType lower_bound = 0, HashIntoType upper_bound = 0) { const char * sp = s.c_str(); const unsigned int length = s.length(); unsigned int n_consumed = 0; HashIntoType forward_hash = 0, reverse_hash = 0; // generate the hash for the first kmer in the read (fair amount of work) HashIntoType bin = _hash(sp, _ksize, forward_hash, reverse_hash); SetID set_ID = 0; // init the set_pointer-number to no set. set_ID = initial_set_fetch_or_assignment( bin ); n_consumed++; // now, do the rest of the kmers in this read (add one nt at a time) for (unsigned int i = _ksize; i < length; i++) { HashIntoType bin = _move_hash_foward( forward_hash, reverse_hash, sp[i] ); set_ID = assign_or_bridge_sets( bin, set_ID ); n_consumed++; } return n_consumed; }
struct BoolExpr * BX_Dict_Search(struct BX_Dict *dict, struct BoolExpr *key) { size_t index = _hash(dict, key); return _list_search(dict->items[index], key); }
//------------------------------Insert--------------------------------------- // Insert or replace a key/value pair in the given dictionary. If the // dictionary is too full, it's size is doubled. The prior value being // replaced is returned (NULL if this is a 1st insertion of that key). If // an old value is found, it's swapped with the prior key-value pair on the // list. This moves a commonly searched-for value towards the list head. void *Dict::Insert(void *key, void *val) { uint hash = _hash( key ); // Get hash key uint i = hash & (_size-1); // Get hash key, corrected for size bucket *b = &_bin[i]; // Handy shortcut for( uint j=0; j<b->_cnt; j++ ) if( !_cmp(key,b->_keyvals[j+j]) ) { void *prior = b->_keyvals[j+j+1]; b->_keyvals[j+j ] = key; // Insert current key-value b->_keyvals[j+j+1] = val; return prior; // Return prior } if( ++_cnt > _size ) { // Hash table is full doubhash(); // Grow whole table if too full i = hash & (_size-1); // Rehash b = &_bin[i]; // Handy shortcut } if( b->_cnt == b->_max ) { // Must grow bucket? if( !b->_keyvals ) { b->_max = 2; // Initial bucket size b->_keyvals = (void**)_arena->Amalloc_4(sizeof(void*) * b->_max * 2); } else { b->_keyvals = (void**)_arena->Arealloc(b->_keyvals, sizeof(void*) * b->_max * 2, sizeof(void*) * b->_max * 4); b->_max <<= 1; // Double bucket } } b->_keyvals[b->_cnt+b->_cnt ] = key; b->_keyvals[b->_cnt+b->_cnt+1] = val; b->_cnt++; return NULL; // Nothing found prior }
//------------------------------doubhash--------------------------------------- // Double hash table size. If can't do so, just suffer. If can, then run // thru old hash table, moving things to new table. Note that since hash // table doubled, exactly 1 new bit is exposed in the mask - so everything // in the old table ends up on 1 of two lists in the new table; a hi and a // lo list depending on the value of the bit. void Dict::doubhash(void) { uint oldsize = _size; _size <<= 1; // Double in size _bin = (bucket*)_arena->Arealloc( _bin, sizeof(bucket)*oldsize, sizeof(bucket)*_size ); memset( &_bin[oldsize], 0, oldsize*sizeof(bucket) ); // Rehash things to spread into new table for( uint i=0; i < oldsize; i++) { // For complete OLD table do bucket *b = &_bin[i]; // Handy shortcut for _bin[i] if( !b->_keyvals ) continue; // Skip empties fast bucket *nb = &_bin[i+oldsize]; // New bucket shortcut uint j = b->_max; // Trim new bucket to nearest power of 2 while( j > b->_cnt ) j >>= 1; // above old bucket _cnt if( !j ) j = 1; // Handle zero-sized buckets nb->_max = j<<1; // Allocate worst case space for key-value pairs nb->_keyvals = (void**)_arena->Amalloc_4( sizeof(void *)*nb->_max*2 ); uint nbcnt = 0; for( j=0; j<b->_cnt; j++ ) { // Rehash all keys in this bucket void *key = b->_keyvals[j+j]; if( (_hash( key ) & (_size-1)) != i ) { // Moving to hi bucket? nb->_keyvals[nbcnt+nbcnt] = key; nb->_keyvals[nbcnt+nbcnt+1] = b->_keyvals[j+j+1]; nb->_cnt = nbcnt = nbcnt+1; b->_cnt--; // Remove key/value from lo bucket b->_keyvals[j+j ] = b->_keyvals[b->_cnt+b->_cnt ]; b->_keyvals[j+j+1] = b->_keyvals[b->_cnt+b->_cnt+1]; j--; // Hash compacted element also } } // End of for all key-value pairs in bucket } // End of for all buckets }
/***** HashTable::search ***** In: int, Food *& Out: Food*&, bool (returned) search takes a key as an int, hashes it to find where it should be, then calls the search function of that LinkedList. If it is successful, the Food *& now contains a pointer to it. It returns a bool for success or not. *****************************/ bool HashTable::search(int id, Food *& returnedPtr) { int hash = _hash(id); if(arr[hash].search(id, returnedPtr)) return true; return false; }
/* * Look up the value for a key and count how many * entries have the same key. * * If no entries have key, return NULL and set count to 0. * * If one entry has the key, the function returns the val, * and sets count to 1. * * If N entries have the key, the function returns the val * from the first entry, and sets count to N. */ void *dm_hash_lookup_with_count(struct dm_hash_table *t, const char *key, int *count) { struct dm_hash_node **c; struct dm_hash_node **c1 = NULL; uint32_t len = strlen(key) + 1; unsigned h; *count = 0; h = _hash(key, len) & (t->num_slots - 1); for (c = &t->slots[h]; *c; c = &((*c)->next)) { if ((*c)->keylen != len) continue; if (!memcmp(key, (*c)->key, len)) { (*count)++; if (!c1) c1 = c; } } if (!c1) return NULL; else return *c1 ? (*c1)->data : 0; }
size_t ConsistentHashingLoadBalancer::AddServersInBatch( const std::vector<ServerId> &servers) { std::vector<Node> add_nodes; add_nodes.reserve(servers.size() * _num_replicas); for (size_t i = 0; i < servers.size(); ++i) { SocketUniquePtr ptr; if (Socket::AddressFailedAsWell(servers[i].id, &ptr) == -1) { continue; } for (size_t rep = 0; rep < _num_replicas; ++rep) { char host[32]; // To be compatible with libmemcached, we formulate the key of // a virtual node as `|address|-|replica_index|', see // http://fe.baidu.com/-1bszwnf at line 297. int len = snprintf(host, sizeof(host), "%s-%lu", endpoint2str(ptr->remote_side()).c_str(), rep); Node node; node.hash = _hash(host, len); node.server_sock = servers[i]; node.server_addr = ptr->remote_side(); add_nodes.push_back(node); } } std::sort(add_nodes.begin(), add_nodes.end()); bool executed = false; const size_t ret = _db_hash_ring.ModifyWithForeground(AddBatch, add_nodes, &executed); CHECK(ret % _num_replicas == 0); const size_t n = ret / _num_replicas; LOG_IF(ERROR, n != servers.size()) << "Fail to AddServersInBatch, expected " << servers.size() << " actually " << n; return n; }
void stringmap_put(stringmap_t *self, const char *key, void *opaque) { uint32_t index; _pair_t *pair, *new_pairs; index = _hash(key) % self->size; pair = (_pair_t *)_find_pair_by_key(&self->buckets[index], key); /* if pair exists update value and return */ if (pair) { pair->opaque = opaque; return; } /* find empty slot */ pair = (_pair_t *)_find_pair_empty_slot(&self->buckets[index]); if (pair) { pair->key = strdup(key); pair->opaque = opaque; return; } /* realloc a new slot */ self->buckets[index].size++; new_pairs = realloc(self->buckets[index].pairs, self->buckets[index].size * sizeof(_pair_t)); self->buckets[index].pairs = new_pairs; /* append key/value to last pair in chain */ pair = (_pair_t *)&self->buckets[index].pairs[self->buckets[index].size - 1]; pair->key = strdup(key); pair->opaque = opaque; }
void SpatialHash::drawHit(HitPoint* hitPoint, Color3f& color) { double cellSize = 1.0 / (photonHitRadius * 2); int x = (int) hitPoint->location.x() * cellSize; int y = (int) hitPoint->location.y() * cellSize; int z = (int) hitPoint->location.z() * cellSize; std::vector<common::PhotonHit*> hits = photonVector[(_hash(x,y,z))]; color += getColor(hits.begin(), hits.end(), hitPoint); }
HashEntry *hash_get(HashEntry **hashtable, unsigned size, char *key) { HashEntry *e = NULL; for (e = hashtable[_hash(key, size)]; e != NULL; e = e->next) { if (strcmp(key, e->key) == 0) return e; } return NULL; }
//------------------------------FindDict------------------------------------- // Find a key-value pair in the given dictionary. If not found, return NULL. // If found, move key-value pair towards head of list. void *Dict::operator [](const void *key) const { uint i = _hash( key ) & (_size-1); // Get hash key, corrected for size bucket *b = &_bin[i]; // Handy shortcut for( uint j=0; j<b->_cnt; j++ ) if( !_cmp(key,b->_keyvals[j+j]) ) return b->_keyvals[j+j+1]; return NULL; }
static void _cb(int gpio, int level, uint32_t tick, void *user) { Pi_Hasher_t * hasher; hasher = user; if (level != PI_TIMEOUT) { if (hasher->in_code == 0) { hasher->in_code = 1; gpioSetWatchdog(gpio, hasher->timeout); hasher->hash_val = 2166136261U; /* FNV_BASIS_32 */ hasher->edges = 1; hasher->t1 = 0; hasher->t2 = 0; hasher->t3 = 0; hasher->t4 = tick; } else { hasher->edges++; hasher->t1 = hasher->t2; hasher->t2 = hasher->t3; hasher->t3 = hasher->t4; hasher->t4 = tick; if (hasher->edges > 3) { hasher->hash_val = _hash(hasher->hash_val, (hasher->t2)-(hasher->t1), (hasher->t4)-(hasher->t3)); } } } else { if (hasher->in_code) { hasher->in_code = 0; gpioSetWatchdog(gpio, 0); if (hasher->edges > 12) /* Anything less is probably noise. */ { (hasher->callback)(hasher->hash_val); } } } }
string_base &operator += ( C s ) { C *tmp = ( C * ) Malloc ( (m_size + 2) * sizeof(C) ); memcpy ( tmp, m_str, m_size * sizeof(C) ); memcpy ( &tmp[m_size], &s, 2 * sizeof(C) ); m_size += 1; Mfree ( m_str ); m_str = tmp; m_hash = _hash ( m_str ); return *this; }
string_base operator + ( C s ) const { string_base ret; ret.m_size = m_size + 1; Mfree ( ret.m_str ); ret.m_str = ( C * ) Malloc ( (ret.m_size + 1) * sizeof(C) ); memcpy ( ret.m_str, m_str, m_size * sizeof(C) ); memcpy ( &ret.m_str[m_size], &s, 2 * sizeof(C) ); ret.m_hash = _hash ( ret.m_str ); return ret; }
string_base operator + ( const string_base &str ) const { string_base ret; ret.m_size = m_size + str.m_size; Mfree ( ret.m_str ); ret.m_str = ( C * ) Malloc ( (ret.m_size + 1) * sizeof(C) ); memcpy ( ret.m_str, m_str, m_size * sizeof(C) ); memcpy ( &ret.m_str[m_size], str.m_str, (str.m_size + 1) * sizeof(C) ); ret.m_hash = _hash ( ret.m_str ); return ret; }
string_base &operator += ( const string_base &str ) { C *tmp = ( C * ) Malloc ( (m_size + str.m_size + 1) * sizeof(C) ); memcpy ( tmp, m_str, m_size * sizeof(C) ); memcpy ( &tmp[m_size], str.m_str, (str.m_size + 1) * sizeof(C) ); m_size += str.m_size; Mfree ( m_str ); m_str = tmp; m_hash = _hash ( m_str ); return *this; }
string_base ( const vector< C, FGetCharRealSizeProc<C> >& str ) { euint count = str.size(); m_str = ( C * ) Malloc ( (count + 1) * sizeof(C) ); C* s = str.get(); memcpy ( m_str, s, count * sizeof(C) ); m_str[count] = 0; m_size = count; m_hash = _hash ( m_str ); }
void *str2ptr_del(struct str2ptr *map, char *key) { unsigned int hash = _hash(key), index = _get_index(map, key, hash); void *result = NULL; if(index <= map->mask) { result = map->pair[index].value; free((char *)map->pair[index].key); memset(map->pair + index, 0, sizeof(*map->pair)); } return result; }
void dict_remove(Dict *d, char *key){ unsigned idx = _hash(key); Node *nd=d->table[idx], *aux=NULL; for(;nd!=NULL;aux=nd,nd=nd->next) if(!strcmp(nd->key,key)){ (aux==NULL) ? (d->table[idx]=nd->next) : (aux->next=nd->next); node_free(nd); return; } }
DB_node * DB_dict::find(char * key_str) { char* k = key_str; strtolower(k); Keytype h = _hash(k); for (DB_node* e = _table[h%_size]; e != 0; e = e->next) if (strcasecmp(k, e->get_data()->get_key_str()) == 0) return e; return 0; }
/***** HashTable::addEntry ***** In: Food * Out: addEntry adds a food pointer to the array. It hashes the key and then adds it to the respective LinkedList. *******************************/ bool HashTable::addEntry(Food * newEntry) { int hash = _hash(newEntry->getKey()); if(!arr[hash].addItem(newEntry)) return false; if(arr[hash].getCount() > 1) collisions++; else filledSlots++; count++; return true; }