/* Expand the hash table if needed */ static int ht_expand_if_needed(struct hashtable *t) { /* If the hash table is empty expand it to the intial size, * if the table is half-full redobule its size. */ if (t->size == 0) return ht_expand(t, HT_INITIAL_SIZE); if (t->size <= (t->used << 1)) return ht_expand(t, t->size << 1); return HT_OK; }
/* Resize the table to the minimal size that contains all the elements */ int ht_resize(struct hashtable *t) { int minimal = (t->used * 2)+1; if (minimal < HT_INITIAL_SIZE) minimal = HT_INITIAL_SIZE; return ht_expand(t, minimal); }
int exmpp_ht_store(struct exmpp_hashtable *ht, const char *key, int key_len, void *value) { unsigned int index; struct exmpp_ht_entry *entry; if (ht == NULL || ht->entries == NULL) return (-1); /* Allocate the new entry. */ entry = driver_alloc(sizeof(*entry)); if (entry == NULL) return (-1); if (key_len == -1) { entry->hash = ht_hash(key); entry->key = exmpp_strdup(key); if (entry->key == NULL) return (-1); } else { entry->hash = ht_hash_len(key, key_len); entry->key = driver_alloc(key_len + 1); if (entry->key == NULL) return (-1); memcpy(entry->key, key, key_len); entry->key[key_len] = '\0'; } entry->key_len = key_len; entry->value = value; #if defined(USE_RWLOCK) erl_drv_rwlock_rwlock(ht->lock); #endif /* Expand the table is necessary. */ if (++(ht->entries_count) > ht->load_limit) { /* * Ignore the return value. If expand fails, we should * still try cramming just this value into the existing * table -- we may not have memory for a larger table, * but one more element may be ok. Next time we insert, * we'll try expanding again. */ ht_expand(ht); } /* Wire the new entry. */ index = entry->hash % ht->length; entry->next = ht->entries[index]; ht->entries[index] = entry; #if defined(USE_RWLOCK) erl_drv_rwlock_rwunlock(ht->lock); #endif return (0); }
int ht_add(Hashtable *ht, const char *key, int key_size, void *value) { if (ht->count >= ht->capacity) { if (ht_expand(ht, ht->count * 2)) return -1; } if (table_add_item(ht->table, ht->real_capacity, key, key_size, value, ht->ignore_case)) return -1; ht->count++; return 0; }
/* Standard thing: hash into ht. At some point we would rehash. */ static Atom ht_insert(Atom a, int pid, struct ir *ir) { Atom hte, *p; Ht ht; ht = a->ht; if(!(hte = ht_lookup(ht, ir->u.eq.val, 0, 0))) { hte = mkatom(a, pid, *ir); LIST_INSERT_HEAD(&a->kids, hte, sibs); p = &ht->ht[hash(ht, ir->u.eq.val)]; /* collision */ if((hte->next = *p)) ++ht->coll; *p = hte; ht->ent++; /* Grow if possible */ if(ht->coll && (ht->ent * 100 >> ht->log2_sz) >= DPF_HT_USAGE) { ht_expand(a); ht->state = DPF_REGEN; } else if(ht_regenp(ht))
hashnode ht_lookup_with_hash (hash_table *table, const unsigned char *str, size_t len, unsigned int hash, enum ht_lookup_option insert) { unsigned int hash2; unsigned int index; size_t sizemask; hashnode node; sizemask = table->nslots - 1; index = hash & sizemask; table->searches++; node = table->entries[index]; if (node != NULL) { if (node->hash_value == hash && HT_LEN (node) == (unsigned int) len && !memcmp (HT_STR (node), str, len)) { if (insert == HT_ALLOCED) /* The string we search for was placed at the end of the obstack. Release it. */ obstack_free (&table->stack, (void *) str); return node; } /* hash2 must be odd, so we're guaranteed to visit every possible location in the table during rehashing. */ hash2 = ((hash * 17) & sizemask) | 1; for (;;) { table->collisions++; index = (index + hash2) & sizemask; node = table->entries[index]; if (node == NULL) break; if (node->hash_value == hash && HT_LEN (node) == (unsigned int) len && !memcmp (HT_STR (node), str, len)) { if (insert == HT_ALLOCED) /* The string we search for was placed at the end of the obstack. Release it. */ obstack_free (&table->stack, (void *) str); return node; } } } if (insert == HT_NO_INSERT) return NULL; node = (*table->alloc_node) (table); table->entries[index] = node; HT_LEN (node) = (unsigned int) len; node->hash_value = hash; if (insert == HT_ALLOC) HT_STR (node) = (const unsigned char *) obstack_copy0 (&table->stack, str, len); else HT_STR (node) = str; if (++table->nelements * 4 >= table->nslots * 3) /* Must expand the string table. */ ht_expand (table); return node; }
hashnode ht_lookup_with_hash (cpp_hash_table *table, const unsigned char *str, size_t len, unsigned int hash, enum ht_lookup_option insert) { unsigned int hash2; unsigned int index; unsigned int deleted_index = table->nslots; size_t sizemask; hashnode node; sizemask = table->nslots - 1; index = hash & sizemask; table->searches++; node = table->entries[index]; if (node != NULL) { if (node == DELETED) deleted_index = index; else if (node->hash_value == hash && HT_LEN (node) == (unsigned int) len && !memcmp (HT_STR (node), str, len)) return node; /* hash2 must be odd, so we're guaranteed to visit every possible location in the table during rehashing. */ hash2 = ((hash * 17) & sizemask) | 1; for (;;) { table->collisions++; index = (index + hash2) & sizemask; node = table->entries[index]; if (node == NULL) break; if (node == DELETED) { if (deleted_index != table->nslots) deleted_index = index; } else if (node->hash_value == hash && HT_LEN (node) == (unsigned int) len && !memcmp (HT_STR (node), str, len)) return node; } } if (insert == HT_NO_INSERT) return NULL; /* We prefer to overwrite the first deleted slot we saw. */ if (deleted_index != table->nslots) index = deleted_index; node = (*table->alloc_node) (table); table->entries[index] = node; HT_LEN (node) = (unsigned int) len; node->hash_value = hash; if (table->alloc_subobject) { char *chars = (char *) table->alloc_subobject (len + 1); memcpy (chars, str, len); chars[len] = '\0'; HT_STR (node) = (const unsigned char *) chars; } else HT_STR (node) = (const unsigned char *) obstack_copy0 (&table->stack, str, len); if (++table->nelements * 4 >= table->nslots * 3) /* Must expand the string table. */ ht_expand (table); return node; }