/** * Measure how well the hash function performs * (1.0 is perfect - no stepping needed). * * Smaller is better! */ double BLI_smallhash_calc_quality(SmallHash *sh) { uint64_t sum = 0; unsigned int i; if (sh->nentries == 0) return -1.0; for (i = 0; i < sh->nbuckets; i++) { if (sh->buckets[i].key != SMHASH_KEY_UNUSED) { uint64_t count = 0; SmallHashEntry *e, *e_final = &sh->buckets[i]; unsigned int h = smallhash_key(e_final->key); unsigned int hoff = 1; for (e = &sh->buckets[h % sh->nbuckets]; e != e_final; h = SMHASH_NEXT(h, hoff), e = &sh->buckets[h % sh->nbuckets]) { count += 1; } sum += count; } } return ((double)(sh->nentries + sum) / (double)sh->nentries); }
BLI_INLINE SmallHashEntry *smallhash_lookup_first_free(SmallHash *sh, const uintptr_t key) { SmallHashEntry *e; unsigned int h = (unsigned int)key; unsigned int hoff = 1; for (e = &sh->buckets[h % sh->nbuckets]; smallhash_val_is_used(e->val); h = SMHASH_NEXT(h, hoff), e = &sh->buckets[h % sh->nbuckets]) { /* pass */ } return e; }
BLI_INLINE SmallHashEntry *smallhash_lookup(SmallHash *sh, const uintptr_t key) { SmallHashEntry *e; unsigned int h = (unsigned int)key; unsigned int hoff = 1; BLI_assert(key != SMHASH_KEY_UNUSED); /* note: there are always more buckets then entries, * so we know there will always be a free bucket if the key isn't found. */ for (e = &sh->buckets[h % sh->nbuckets]; e->val != SMHASH_CELL_FREE; h = SMHASH_NEXT(h, hoff), e = &sh->buckets[h % sh->nbuckets]) { if (e->key == key) { /* should never happen because unused keys are zero'd */ BLI_assert(e->val != SMHASH_CELL_UNUSED); return e; } } return NULL; }