static bool bch_key_sort_cmp(struct btree_iter_set l, struct btree_iter_set r) { int64_t c = bkey_cmp(l.k, r.k); return c ? c > 0 : l.k < r.k; }
static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k) { unsigned i; for (i = 0; i < KEY_PTRS(k); i++) if (ptr_available(c, k, i)) { struct cache *ca = PTR_CACHE(c, k, i); size_t bucket = PTR_BUCKET_NR(c, k, i); size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); if (KEY_SIZE(k) + r > c->sb.bucket_size) return "bad, length too big"; if (bucket < ca->sb.first_bucket) return "bad, short offset"; if (bucket >= ca->sb.nbuckets) return "bad, offset past end of device"; if (ptr_stale(c, k, i)) return "stale"; } if (!bkey_cmp(k, &ZERO_KEY)) return "bad, null key"; if (!KEY_PTRS(k)) return "bad, no pointers"; if (!KEY_SIZE(k)) return "zeroed key"; return ""; }
/* * Returns true if l > r - unless l == r, in which case returns true if l is * older than r. * * Necessary for btree_sort_fixup() - if there are multiple keys that compare * equal in different sets, we have to process them newest to oldest. */ static bool bch_extent_sort_cmp(struct btree_iter_set l, struct btree_iter_set r) { int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k)); return c ? c > 0 : l.k < r.k; }
static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter, struct bkey *tmp) { while (iter->used > 1) { struct btree_iter_set *top = iter->data, *i = top + 1; if (iter->used > 2 && bch_extent_sort_cmp(i[0], i[1])) i++; if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0) break; if (!KEY_SIZE(i->k)) { sort_key_next(iter, i); heap_sift(iter, i - top, bch_extent_sort_cmp); continue; } if (top->k > i->k) { if (bkey_cmp(top->k, i->k) >= 0) sort_key_next(iter, i); else bch_cut_front(top->k, i->k); heap_sift(iter, i - top, bch_extent_sort_cmp); } else { /* can't happen because of comparison func */ BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k))); if (bkey_cmp(i->k, top->k) < 0) { bkey_copy(tmp, top->k); bch_cut_back(&START_KEY(i->k), tmp); bch_cut_front(i->k, top->k); heap_sift(iter, 0, bch_extent_sort_cmp); return tmp; } else { bch_cut_back(&START_KEY(i->k), top->k); } } } return NULL; }
static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k) { struct btree *b = container_of(bk, struct btree, keys); unsigned i; if (!bkey_cmp(k, &ZERO_KEY) || !KEY_PTRS(k) || bch_ptr_invalid(bk, k)) return true; for (i = 0; i < KEY_PTRS(k); i++) if (!ptr_available(b->c, k, i) || ptr_stale(b->c, k, i)) return true; if (expensive_debug_checks(b->c) && btree_ptr_bad_expensive(b, k)) return true; return false; }
static bool bch_extent_insert_fixup(struct btree_keys *b, struct bkey *insert, struct btree_iter *iter, struct bkey *replace_key) { struct cache_set *c = container_of(b, struct btree, keys)->c; uint64_t old_offset; unsigned old_size, sectors_found = 0; BUG_ON(!KEY_OFFSET(insert)); BUG_ON(!KEY_SIZE(insert)); while (1) { struct bkey *k = bch_btree_iter_next(iter); if (!k) break; if (bkey_cmp(&START_KEY(k), insert) >= 0) { if (KEY_SIZE(k)) break; else continue; } if (bkey_cmp(k, &START_KEY(insert)) <= 0) continue; old_offset = KEY_START(k); old_size = KEY_SIZE(k); /* * We might overlap with 0 size extents; we can't skip these * because if they're in the set we're inserting to we have to * adjust them so they don't overlap with the key we're * inserting. But we don't want to check them for replace * operations. */ if (replace_key && KEY_SIZE(k)) { /* * k might have been split since we inserted/found the * key we're replacing */ unsigned i; uint64_t offset = KEY_START(k) - KEY_START(replace_key); /* But it must be a subset of the replace key */ if (KEY_START(k) < KEY_START(replace_key) || KEY_OFFSET(k) > KEY_OFFSET(replace_key)) goto check_failed; /* We didn't find a key that we were supposed to */ if (KEY_START(k) > KEY_START(insert) + sectors_found) goto check_failed; if (!bch_bkey_equal_header(k, replace_key)) goto check_failed; /* skip past gen */ offset <<= 8; BUG_ON(!KEY_PTRS(replace_key)); for (i = 0; i < KEY_PTRS(replace_key); i++) if (k->ptr[i] != replace_key->ptr[i] + offset) goto check_failed; sectors_found = KEY_OFFSET(k) - KEY_START(insert); } if (bkey_cmp(insert, k) < 0 && bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) { /* * We overlapped in the middle of an existing key: that * means we have to split the old key. But we have to do * slightly different things depending on whether the * old key has been written out yet. */ struct bkey *top; bch_subtract_dirty(k, c, KEY_START(insert), KEY_SIZE(insert)); if (bkey_written(b, k)) { /* * We insert a new key to cover the top of the * old key, and the old key is modified in place * to represent the bottom split. * * It's completely arbitrary whether the new key * is the top or the bottom, but it has to match * up with what btree_sort_fixup() does - it * doesn't check for this kind of overlap, it * depends on us inserting a new key for the top * here. */ top = bch_bset_search(b, bset_tree_last(b), insert); bch_bset_insert(b, top, k); } else { BKEY_PADDED(key) temp; bkey_copy(&temp.key, k); bch_bset_insert(b, k, &temp.key); top = bkey_next(k); } bch_cut_front(insert, top); bch_cut_back(&START_KEY(insert), k); bch_bset_fix_invalidated_key(b, k); goto out; } if (bkey_cmp(insert, k) < 0) { bch_cut_front(insert, k); } else { if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) old_offset = KEY_START(insert); if (bkey_written(b, k) && bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) { /* * Completely overwrote, so we don't have to * invalidate the binary search tree */ bch_cut_front(k, k); } else { __bch_cut_back(&START_KEY(insert), k); bch_bset_fix_invalidated_key(b, k); } } bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k)); } check_failed: if (replace_key) { if (!sectors_found) { return true; } else if (sectors_found < KEY_SIZE(insert)) { SET_KEY_OFFSET(insert, KEY_OFFSET(insert) - (KEY_SIZE(insert) - sectors_found)); SET_KEY_SIZE(insert, sectors_found); } } out: if (KEY_DIRTY(insert)) bcache_dev_sectors_dirty_add(c, KEY_INODE(insert), KEY_START(insert), KEY_SIZE(insert)); return false; }
static bool skipped_backwards(struct btree *b, struct bkey *k) { return bkey_cmp(k, (!b->level) ? &START_KEY(bkey_next(k)) : bkey_next(k)) > 0; }