BLI_INLINE void ghash_insert(GHash *gh, void *key, void *val) { const unsigned int hash = ghash_keyhash(gh, key); const unsigned int bucket_index = ghash_bucket_index(gh, hash); ghash_insert_ex(gh, key, val, bucket_index); }
/** * Internal lookup function. Only wraps #ghash_lookup_entry_ex */ BLI_INLINE Entry *ghash_lookup_entry(GHash *gh, const void *key) { const unsigned int hash = ghash_keyhash(gh, key); const unsigned int bucket_index = ghash_bucket_index(gh, hash); return ghash_lookup_entry_ex(gh, key, bucket_index); }
} BLI_INLINE void ghash_insert(GHash *gh, void *key, void *val) { const unsigned int hash = ghash_keyhash(gh, key); const unsigned int bucket_index = ghash_bucket_index(gh, hash); ghash_insert_ex(gh, key, val, bucket_index); } BLI_INLINE bool ghash_insert_safe( GHash *gh, void *key, void *val, const bool override, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp) { const unsigned int hash = ghash_keyhash(gh, key); const unsigned int bucket_index = ghash_bucket_index(gh, hash); GHashEntry *e = (GHashEntry *)ghash_lookup_entry_ex(gh, key, bucket_index); BLI_assert(!(gh->flag & GHASH_FLAG_IS_GSET)); if (e) { if (override) { if (keyfreefp) { keyfreefp(e->e.key); } if (valfreefp) { valfreefp(e->val); } e->e.key = key; e->val = val; }
/** * Expand buckets to the next size up or down. */ static void ghash_buckets_resize(GHash *gh, const unsigned int nbuckets) { Entry **buckets_old = gh->buckets; Entry **buckets_new; const unsigned int nbuckets_old = gh->nbuckets; unsigned int i; BLI_assert((gh->nbuckets != nbuckets) || !gh->buckets); // printf("%s: %d -> %d\n", __func__, nbuckets_old, nbuckets); gh->nbuckets = nbuckets; #ifdef GHASH_USE_MODULO_BUCKETS #else gh->bucket_mask = nbuckets - 1; #endif buckets_new = (Entry **)MEM_callocN(sizeof(*gh->buckets) * gh->nbuckets, __func__); if (buckets_old) { if (nbuckets > nbuckets_old) { for (i = 0; i < nbuckets_old; i++) { for (Entry *e = buckets_old[i], *e_next; e; e = e_next) { const unsigned hash = ghash_entryhash(gh, e); const unsigned bucket_index = ghash_bucket_index(gh, hash); e_next = e->next; e->next = buckets_new[bucket_index]; buckets_new[bucket_index] = e; } } } else { for (i = 0; i < nbuckets_old; i++) { #ifdef GHASH_USE_MODULO_BUCKETS for (Entry *e = buckets_old[i], *e_next; e; e = e_next) { const unsigned hash = ghash_entryhash(gh, e); const unsigned bucket_index = ghash_bucket_index(gh, hash); e_next = e->next; e->next = buckets_new[bucket_index]; buckets_new[bucket_index] = e; } #else /* No need to recompute hashes in this case, since our mask is just smaller, all items in old bucket i * will go in same new bucket (i & new_mask)! */ const unsigned bucket_index = ghash_bucket_index(gh, i); BLI_assert(!buckets_old[i] || (bucket_index == ghash_bucket_index(gh, ghash_entryhash(gh, buckets_old[i])))); Entry *e; for (e = buckets_old[i]; e && e->next; e = e->next); if (e) { e->next = buckets_new[bucket_index]; buckets_new[bucket_index] = buckets_old[i]; } #endif } } } gh->buckets = buckets_new; if (buckets_old) { MEM_freeN(buckets_old); } }
} BLI_INLINE void ghash_insert(GHash *gh, void *key, void *val) { const uint hash = ghash_keyhash(gh, key); const uint bucket_index = ghash_bucket_index(gh, hash); ghash_insert_ex(gh, key, val, bucket_index); } BLI_INLINE bool ghash_insert_safe( GHash *gh, void *key, void *val, const bool override, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp) { const uint hash = ghash_keyhash(gh, key); const uint bucket_index = ghash_bucket_index(gh, hash); GHashEntry *e = (GHashEntry *)ghash_lookup_entry_ex(gh, key, bucket_index); BLI_assert(!(gh->flag & GHASH_FLAG_IS_GSET)); if (e) { if (override) { if (keyfreefp) { keyfreefp(e->e.key); } if (valfreefp) { valfreefp(e->val); } e->e.key = key; e->val = val; }