void Dictionary::reorder_dictionary() { // Copy all the dictionary entries into a single master list. DictionaryEntry* master_list = NULL; for (int i = 0; i < table_size(); ++i) { DictionaryEntry* p = bucket(i); while (p != NULL) { DictionaryEntry* tmp; tmp = p->next(); p->set_next(master_list); master_list = p; p = tmp; } set_entry(i, NULL); } // Add the dictionary entries back to the list in the correct buckets. while (master_list != NULL) { DictionaryEntry* p = master_list; master_list = master_list->next(); p->set_next(NULL); Symbol* class_name = InstanceKlass::cast((Klass*)(p->klass()))->name(); // Since the null class loader data isn't copied to the CDS archive, // compute the hash with NULL for loader data. unsigned int hash = compute_hash(class_name, NULL); int index = hash_to_index(hash); p->set_hash(hash); p->set_loader_data(NULL); // loader_data isn't copied to CDS p->set_next(bucket(index)); set_entry(index, p); } }
static SCM weak_set_add_x (scm_t_weak_set *set, unsigned long hash, scm_t_set_predicate_fn pred, void *closure, SCM obj) { unsigned long k, distance, size; scm_t_weak_entry *entries; size = set->size; entries = set->entries; hash = (hash << 1) | 0x1; k = hash_to_index (hash, size); for (distance = 0; ; distance++, k = (k + 1) % size) { unsigned long other_hash; retry: other_hash = entries[k].hash; if (!other_hash) /* Found an empty entry. */ break; if (other_hash == hash) { scm_t_weak_entry copy; copy_weak_entry (&entries[k], ©); if (!copy.key) /* Lost weak reference; reshuffle. */ { give_to_poor (set, k); set->n_items--; goto retry; } if (pred (SCM_PACK (copy.key), closure)) /* Found an entry with this key. */ return SCM_PACK (copy.key); } if (set->n_items > set->upper) /* Full set, time to resize. */ { resize_set (set); return weak_set_add_x (set, hash >> 1, pred, closure, obj); } /* Displace the entry if our distance is less, otherwise keep looking. */ if (entry_distance (other_hash, k, size) < distance) { rob_from_rich (set, k); break; } }
static void weak_table_put_x (scm_t_weak_table *table, unsigned long hash, scm_t_table_predicate_fn pred, void *closure, SCM key, SCM value) { unsigned long k, distance, size; scm_t_weak_entry *entries; size = table->size; entries = table->entries; hash = (hash << 1) | 0x1; k = hash_to_index (hash, size); for (distance = 0; ; distance++, k = (k + 1) % size) { unsigned long other_hash; retry: other_hash = entries[k].hash; if (!other_hash) /* Found an empty entry. */ break; if (other_hash == hash) { scm_t_weak_entry copy; copy_weak_entry (&entries[k], ©); if (!copy.key || !copy.value) /* Lost weak reference; reshuffle. */ { give_to_poor (table, k); table->n_items--; goto retry; } if (pred (SCM_PACK (copy.key), SCM_PACK (copy.value), closure)) /* Found an entry with this key. */ break; } if (table->n_items > table->upper) /* Full table, time to resize. */ { resize_table (table); return weak_table_put_x (table, hash >> 1, pred, closure, key, value); } /* Displace the entry if our distance is less, otherwise keep looking. */ if (entry_distance (other_hash, k, size) < distance) { rob_from_rich (table, k); break; } }
ProtectionDomainCacheEntry* ProtectionDomainCacheTable::get(oop protection_domain) { unsigned int hash = compute_hash(protection_domain); int index = hash_to_index(hash); ProtectionDomainCacheEntry* entry = find_entry(index, protection_domain); if (entry == NULL) { entry = add_entry(index, hash, protection_domain); } return entry; }
static unsigned long entry_distance (unsigned long hash, unsigned long k, unsigned long size) { unsigned long origin = hash_to_index (hash, size); if (k >= origin) return k - origin; else /* The other key was displaced and wrapped around. */ return size - origin + k; }
void remove(const Key &key) { auto hash = hash_fun_(key); auto index = hash_to_index(hash); for (auto node = table_[index].data.head_node(); node; node = node->next) { if (((*node->value)->hash == hash) && (*(*node->value)->key == key)) { table_[index].data.remove(node); return; } } }
udx_haddress* find_by_hash(udx_hash const & hash) { u32 const i = hash_to_index(hash); udx_haddress* e = m_hashtable[i]; while (e != nullptr) { if (e->m_hash.m_len == hash.m_len && memcmp(e->m_hash.m_hash, hash.m_hash, sizeof(hash)) == 0) break; e = e->m_next; } return e; }
void Dictionary::add_klass(Symbol* class_name, ClassLoaderData* loader_data, KlassHandle obj) { assert_locked_or_safepoint(SystemDictionary_lock); assert(obj() != NULL, "adding NULL obj"); assert(obj()->name() == class_name, "sanity check on name"); assert(loader_data != NULL, "Must be non-NULL"); unsigned int hash = compute_hash(class_name, loader_data); int index = hash_to_index(hash); DictionaryEntry* entry = new_entry(hash, obj(), loader_data); add_entry(index, entry); }
static SCM weak_set_lookup (scm_t_weak_set *set, unsigned long hash, scm_t_set_predicate_fn pred, void *closure, SCM dflt) { unsigned long k, distance, size; scm_t_weak_entry *entries; size = set->size; entries = set->entries; hash = (hash << 1) | 0x1; k = hash_to_index (hash, size); for (distance = 0; distance < size; distance++, k = (k + 1) % size) { unsigned long other_hash; retry: other_hash = entries[k].hash; if (!other_hash) /* Not found. */ return dflt; if (hash == other_hash) { scm_t_weak_entry copy; copy_weak_entry (&entries[k], ©); if (!copy.key) /* Lost weak reference; reshuffle. */ { give_to_poor (set, k); set->n_items--; goto retry; } if (pred (SCM_PACK (copy.key), closure)) /* Found. */ return SCM_PACK (copy.key); } /* If the entry's distance is less, our key is not in the set. */ if (entry_distance (other_hash, k, size) < distance) return dflt; } /* If we got here, then we were unfortunate enough to loop through the whole set. Shouldn't happen, but hey. */ return dflt; }
bool insert(const Key &key, const Value &value) { auto hash = hash_fun_(key); auto index = hash_to_index(hash); for (auto node = table_[index].data.head_node(); node; node = node->next) { if (((*node->value)->hash == hash) && (*(*node->value)->key == key)) { return false; } } std::shared_ptr<entry> e(new entry(key, value, hash)); table_[index].data.add_tail(e); return true; }
std::shared_ptr<const Value> find(const Key &key) const { auto hash = hash_fun_(key); auto index = hash_to_index(hash); std::shared_ptr<const Value> result; for (auto node = table_[index].data.head_node(); node; node = node->next) { if (((*node->value)->hash == hash) && (*(*node->value)->key == key)) { result = (*node->value)->value; break; } } return result; }
void add(udx_haddress* h) { u32 const i = hash_to_index(h->m_hash); udx_haddress** p = &m_hashtable[i]; udx_haddress* e = m_hashtable[i]; while (e != nullptr) { s32 r = memcmp(e->m_hash.m_hash, h->m_hash.m_hash, sizeof(udx_haddress::m_hash)); if (r == -1) { break; } p = &e->m_next; e = e->m_next; } *p = h; h->m_next = e; }
void remove(udx_haddress* h) { u32 const i = hash_to_index(h->m_hash); udx_haddress** p = &m_hashtable[i]; udx_haddress* e = m_hashtable[i]; while (e != nullptr) { s32 r = memcmp(e->m_hash.m_hash, h->m_hash.m_hash, sizeof(udx_haddress::m_hash)); if (r == 0) { *p = e->m_next; e = e->m_next; break; } p = &e->m_next; e = e->m_next; } h->m_next = nullptr; }
void ProtectionDomainCacheTable::free(ProtectionDomainCacheEntry* to_delete) { unsigned int hash = compute_hash(to_delete->protection_domain()); int index = hash_to_index(hash); ProtectionDomainCacheEntry** p = bucket_addr(index); ProtectionDomainCacheEntry* entry = bucket(index); while (true) { assert(entry != NULL, "sanity"); if (entry == to_delete) { *p = entry->next(); Hashtable<oop, mtClass>::free_entry(entry); break; } else { p = entry->next_addr(); entry = *p; } } }
static void give_to_poor (scm_t_weak_table *table, unsigned long k) { /* Slot K was just freed up; possibly shuffle others down. */ unsigned long size = table->size; while (1) { unsigned long next = (k + 1) % size; unsigned long hash; scm_t_weak_entry copy; hash = table->entries[next].hash; if (!hash || hash_to_index (hash, size) == next) break; copy_weak_entry (&table->entries[next], ©); if (!copy.key || !copy.value) /* Lost weak reference. */ { give_to_poor (table, next); table->n_items--; continue; } move_weak_entry (&table->entries[next], &table->entries[k], table->kind); k = next; } /* We have shuffled down any entries that should be shuffled down; now free the end. */ table->entries[k].hash = 0; table->entries[k].key = 0; table->entries[k].value = 0; }
typeArrayOop G1StringDedupTable::lookup_or_add_inner(typeArrayOop value, bool latin1, unsigned int hash) { size_t index = hash_to_index(hash); G1StringDedupEntry** list = bucket(index); uintx count = 0; // Lookup in list typeArrayOop existing_value = lookup(value, latin1, hash, list, count); // Check if rehash is needed if (count > _rehash_threshold) { _rehash_needed = true; } if (existing_value == NULL) { // Not found, add new entry add(value, latin1, hash, list); // Update statistics _entries_added++; } return existing_value; }
bool LoaderConstraintTable::add_entry(symbolHandle class_name, klassOop klass1, Handle class_loader1, klassOop klass2, Handle class_loader2) { int failure_code = 0; // encode different reasons for failing if (klass1 != NULL && klass2 != NULL && klass1 != klass2) { failure_code = 1; } else { klassOop klass = klass1 != NULL ? klass1 : klass2; LoaderConstraintEntry** pp1 = find_loader_constraint(class_name, class_loader1); if (*pp1 != NULL && (*pp1)->klass() != NULL) { if (klass != NULL) { if (klass != (*pp1)->klass()) { failure_code = 2; } } else { klass = (*pp1)->klass(); } } LoaderConstraintEntry** pp2 = find_loader_constraint(class_name, class_loader2); if (*pp2 != NULL && (*pp2)->klass() != NULL) { if (klass != NULL) { if (klass != (*pp2)->klass()) { failure_code = 3; } } else { klass = (*pp2)->klass(); } } if (failure_code == 0) { if (*pp1 == NULL && *pp2 == NULL) { unsigned int hash = compute_hash(class_name); int index = hash_to_index(hash); LoaderConstraintEntry* p; p = new_entry(hash, class_name(), klass, 2, 2); p->set_loaders(NEW_C_HEAP_ARRAY(oop, 2)); p->set_loader(0, class_loader1()); p->set_loader(1, class_loader2()); p->set_klass(klass); p->set_next(bucket(index)); set_entry(index, p); if (TraceLoaderConstraints) { ResourceMark rm; tty->print("[Adding new constraint for name: %s, loader[0]: %s," " loader[1]: %s ]\n", class_name()->as_C_string(), SystemDictionary::loader_name(class_loader1()), SystemDictionary::loader_name(class_loader2()) ); } } else if (*pp1 == *pp2) { /* constraint already imposed */ if ((*pp1)->klass() == NULL) { (*pp1)->set_klass(klass); if (TraceLoaderConstraints) { ResourceMark rm; tty->print("[Setting class object in existing constraint for" " name: %s and loader %s ]\n", class_name()->as_C_string(), SystemDictionary::loader_name(class_loader1()) ); } } else { assert((*pp1)->klass() == klass, "loader constraints corrupted"); } } else if (*pp1 == NULL) { extend_loader_constraint(*pp2, class_loader1, klass); } else if (*pp2 == NULL) { extend_loader_constraint(*pp1, class_loader2, klass); } else { merge_loader_constraints(pp1, pp2, klass); } } } if (failure_code != 0 && TraceLoaderConstraints) { ResourceMark rm; const char* reason = ""; switch(failure_code) { case 1: reason = "the class objects presented by loader[0] and loader[1]" " are different"; break; case 2: reason = "the class object presented by loader[0] does not match" " the stored class object in the constraint"; break; case 3: reason = "the class object presented by loader[1] does not match" " the stored class object in the constraint"; break; default: reason = "unknown reason code"; } tty->print("[Failed to add constraint for name: %s, loader[0]: %s," " loader[1]: %s, Reason: %s ]\n", class_name()->as_C_string(), SystemDictionary::loader_name(class_loader1()), SystemDictionary::loader_name(class_loader2()), reason ); } return failure_code == 0; }
static void resize_set (scm_t_weak_set *set) { scm_t_weak_entry *old_entries, *new_entries; int new_size_index; unsigned long old_size, new_size, old_k; do { new_size_index = compute_size_index (set); if (new_size_index == set->size_index) return; new_size = hashset_size[new_size_index]; new_entries = scm_gc_malloc_pointerless (new_size * sizeof(scm_t_weak_entry), "weak set"); } while (!is_acceptable_size_index (set, new_size_index)); old_entries = set->entries; old_size = set->size; memset (new_entries, 0, new_size * sizeof(scm_t_weak_entry)); set->size_index = new_size_index; set->size = new_size; if (new_size_index <= set->min_size_index) set->lower = 0; else set->lower = new_size / 5; set->upper = 9 * new_size / 10; set->n_items = 0; set->entries = new_entries; for (old_k = 0; old_k < old_size; old_k++) { scm_t_weak_entry copy; unsigned long new_k, distance; if (!old_entries[old_k].hash) continue; copy_weak_entry (&old_entries[old_k], ©); if (!copy.key) continue; new_k = hash_to_index (copy.hash, new_size); for (distance = 0; ; distance++, new_k = (new_k + 1) % new_size) { unsigned long other_hash = new_entries[new_k].hash; if (!other_hash) /* Found an empty entry. */ break; /* Displace the entry if our distance is less, otherwise keep looking. */ if (entry_distance (other_hash, new_k, new_size) < distance) { rob_from_rich (set, new_k); break; } } set->n_items++; new_entries[new_k].hash = copy.hash; new_entries[new_k].key = copy.key; if (SCM_HEAP_OBJECT_P (SCM_PACK (copy.key))) SCM_I_REGISTER_DISAPPEARING_LINK ((void **) &new_entries[new_k].key, (void *) new_entries[new_k].key); } }
int index_for(Symbol* name) const { return hash_to_index(compute_hash(name)); }
int index_for(symbolHandle name, intptr_t symbol_mode) { return hash_to_index(compute_hash(name, symbol_mode)); }
static void resize_table (scm_t_weak_table *table) { scm_t_weak_entry *old_entries, *new_entries; int new_size_index; unsigned long old_size, new_size, old_k; do { new_size_index = compute_size_index (table); if (new_size_index == table->size_index) return; new_size = hashtable_size[new_size_index]; scm_i_pthread_mutex_unlock (&table->lock); /* Allocating memory might cause finalizers to run, which could run anything, so drop our lock to avoid deadlocks. */ new_entries = allocate_entries (new_size, table->kind); scm_i_pthread_mutex_unlock (&table->lock); } while (!is_acceptable_size_index (table, new_size_index)); old_entries = table->entries; old_size = table->size; table->size_index = new_size_index; table->size = new_size; if (new_size_index <= table->min_size_index) table->lower = 0; else table->lower = new_size / 5; table->upper = 9 * new_size / 10; table->n_items = 0; table->entries = new_entries; for (old_k = 0; old_k < old_size; old_k++) { scm_t_weak_entry copy; unsigned long new_k, distance; if (!old_entries[old_k].hash) continue; copy_weak_entry (&old_entries[old_k], ©); if (!copy.key || !copy.value) continue; new_k = hash_to_index (copy.hash, new_size); for (distance = 0; ; distance++, new_k = (new_k + 1) % new_size) { unsigned long other_hash = new_entries[new_k].hash; if (!other_hash) /* Found an empty entry. */ break; /* Displace the entry if our distance is less, otherwise keep looking. */ if (entry_distance (other_hash, new_k, new_size) < distance) { rob_from_rich (table, new_k); break; } } table->n_items++; new_entries[new_k].hash = copy.hash; new_entries[new_k].key = copy.key; new_entries[new_k].value = copy.value; register_disappearing_links (&new_entries[new_k], SCM_PACK (copy.key), SCM_PACK (copy.value), table->kind); } }