static tdb_len_t free_record_length(struct tdb_context *tdb, tdb_off_t off) { struct tdb_free_record f; enum TDB_ERROR ecode; ecode = tdb_read_convert(tdb, off, &f, sizeof(f)); if (ecode != TDB_SUCCESS) return ecode; if (frec_magic(&f) != TDB_FREE_MAGIC) return TDB_ERR_CORRUPT; return frec_len(&f); }
static bool empty_freetable(struct tdb_context *tdb) { struct tdb_freetable free; unsigned int i; /* Now, free table should be completely exhausted in zone 0 */ if (tdb_read_convert(tdb, tdb->ftable_off, &free, sizeof(free)) != 0) abort(); for (i = 0; i < sizeof(free.buckets)/sizeof(free.buckets[0]); i++) { if (free.buckets[i]) return false; } return true; }
/* Does entry match? */ static tdb_bool_err match(struct tdb_context *tdb, struct hash_info *h, const struct tdb_data *key, tdb_off_t val, struct tdb_used_record *rec) { tdb_off_t off; enum TDB_ERROR ecode; tdb->stats.compares++; /* Desired bucket must match. */ if (h->home_bucket != (val & TDB_OFF_HASH_GROUP_MASK)) { tdb->stats.compare_wrong_bucket++; return false; } /* Top bits of offset == next bits of hash. */ if (bits_from(val, TDB_OFF_HASH_EXTRA_BIT, TDB_OFF_UPPER_STEAL_EXTRA) != bits_from(h->h, 64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA, TDB_OFF_UPPER_STEAL_EXTRA)) { tdb->stats.compare_wrong_offsetbits++; return false; } off = val & TDB_OFF_MASK; ecode = tdb_read_convert(tdb, off, rec, sizeof(*rec)); if (ecode != TDB_SUCCESS) { return ecode; } if ((h->h & ((1 << 11)-1)) != rec_hash(rec)) { tdb->stats.compare_wrong_rechash++; return false; } return key_matches(tdb, rec, off, key); }
/* This is the core routine which searches the hashtable for an entry. * On error, no locks are held and -ve is returned. * Otherwise, hinfo is filled in (and the optional tinfo). * If not found, the return value is 0. * If found, the return value is the offset, and *rec is the record. */ tdb_off_t find_and_lock(struct tdb_context *tdb, struct tdb_data key, int ltype, struct hash_info *h, struct tdb_used_record *rec, struct traverse_info *tinfo) { uint32_t i, group; tdb_off_t hashtable; enum TDB_ERROR ecode; h->h = tdb_hash(tdb, key.dptr, key.dsize); h->hash_used = 0; group = use_bits(h, TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS); h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS); h->hlock_start = hlock_range(group, &h->hlock_range); ecode = tdb_lock_hashes(tdb, h->hlock_start, h->hlock_range, ltype, TDB_LOCK_WAIT); if (ecode != TDB_SUCCESS) { return ecode; } hashtable = offsetof(struct tdb_header, hashtable); if (tinfo) { tinfo->toplevel_group = group; tinfo->num_levels = 1; tinfo->levels[0].entry = 0; tinfo->levels[0].hashtable = hashtable + (group << TDB_HASH_GROUP_BITS) * sizeof(tdb_off_t); tinfo->levels[0].total_buckets = 1 << TDB_HASH_GROUP_BITS; } while (h->hash_used <= 64) { /* Read in the hash group. */ h->group_start = hashtable + group * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS); ecode = tdb_read_convert(tdb, h->group_start, &h->group, sizeof(h->group)); if (ecode != TDB_SUCCESS) { goto fail; } /* Pointer to another hash table? Go down... */ if (is_subhash(h->group[h->home_bucket])) { hashtable = (h->group[h->home_bucket] & TDB_OFF_MASK) + sizeof(struct tdb_used_record); if (tinfo) { /* When we come back, use *next* bucket */ tinfo->levels[tinfo->num_levels-1].entry += h->home_bucket + 1; } group = use_bits(h, TDB_SUBLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS); h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS); if (tinfo) { tinfo->levels[tinfo->num_levels].hashtable = hashtable; tinfo->levels[tinfo->num_levels].total_buckets = 1 << TDB_SUBLEVEL_HASH_BITS; tinfo->levels[tinfo->num_levels].entry = group << TDB_HASH_GROUP_BITS; tinfo->num_levels++; } continue; } /* It's in this group: search (until 0 or all searched) */ for (i = 0, h->found_bucket = h->home_bucket; i < (1 << TDB_HASH_GROUP_BITS); i++, h->found_bucket = ((h->found_bucket+1) % (1 << TDB_HASH_GROUP_BITS))) { tdb_bool_err berr; if (is_subhash(h->group[h->found_bucket])) continue; if (!h->group[h->found_bucket]) break; berr = match(tdb, h, &key, h->group[h->found_bucket], rec); if (berr < 0) { ecode = berr; goto fail; } if (berr) { if (tinfo) { tinfo->levels[tinfo->num_levels-1].entry += h->found_bucket; } return h->group[h->found_bucket] & TDB_OFF_MASK; } } /* Didn't find it: h indicates where it would go. */ return 0; } return find_in_chain(tdb, key, hashtable, h, rec, tinfo); fail: tdb_unlock_hashes(tdb, h->hlock_start, h->hlock_range, ltype); return ecode; }
static tdb_off_t COLD find_in_chain(struct tdb_context *tdb, struct tdb_data key, tdb_off_t chain, struct hash_info *h, struct tdb_used_record *rec, struct traverse_info *tinfo) { tdb_off_t off, next; enum TDB_ERROR ecode; /* In case nothing is free, we set these to zero. */ h->home_bucket = h->found_bucket = 0; for (off = chain; off; off = next) { unsigned int i; h->group_start = off; ecode = tdb_read_convert(tdb, off, h->group, sizeof(h->group)); if (ecode != TDB_SUCCESS) { return ecode; } for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) { tdb_off_t recoff; if (!h->group[i]) { /* Remember this empty bucket. */ h->home_bucket = h->found_bucket = i; continue; } /* We can insert extra bits via add_to_hash * empty bucket logic. */ recoff = h->group[i] & TDB_OFF_MASK; ecode = tdb_read_convert(tdb, recoff, rec, sizeof(*rec)); if (ecode != TDB_SUCCESS) { return ecode; } ecode = key_matches(tdb, rec, recoff, &key); if (ecode < 0) { return ecode; } if (ecode == 1) { h->home_bucket = h->found_bucket = i; if (tinfo) { tinfo->levels[tinfo->num_levels] .hashtable = off; tinfo->levels[tinfo->num_levels] .total_buckets = 1 << TDB_HASH_GROUP_BITS; tinfo->levels[tinfo->num_levels].entry = i; tinfo->num_levels++; } return recoff; } } next = tdb_read_off(tdb, off + offsetof(struct tdb_chain, next)); if (TDB_OFF_IS_ERR(next)) { return next; } if (next) next += sizeof(struct tdb_used_record); } return 0; }