void bch_extent_to_text(char *buf, size_t size, const struct bkey *k) { unsigned i = 0; char *out = buf, *end = buf + size; #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__)) p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k)); for (i = 0; i < KEY_PTRS(k); i++) { if (i) p(", "); if (PTR_DEV(k, i) == PTR_CHECK_DEV) p("check dev"); else p("%llu:%llu gen %llu", PTR_DEV(k, i), PTR_OFFSET(k, i), PTR_GEN(k, i)); } p("]"); if (KEY_DIRTY(k)) p(" dirty"); if (KEY_CSUM(k)) p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]); #undef p }
static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, unsigned ptr) { struct bucket *g = PTR_BUCKET(b->c, k, ptr); char buf[80]; if (mutex_trylock(&b->c->bucket_lock)) { if (b->c->gc_mark_valid && (!GC_MARK(g) || GC_MARK(g) == GC_MARK_METADATA || (GC_MARK(g) != GC_MARK_DIRTY && KEY_DIRTY(k)))) goto err; if (g->prio == BTREE_PRIO) goto err; mutex_unlock(&b->c->bucket_lock); } return false; err: mutex_unlock(&b->c->bucket_lock); bch_extent_to_text(buf, sizeof(buf), k); btree_bug(b, "inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu", buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin), g->prio, g->gen, g->last_gc, GC_MARK(g)); return true; }
static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k) { unsigned i; char buf[80]; struct bucket *g; if (mutex_trylock(&b->c->bucket_lock)) { for (i = 0; i < KEY_PTRS(k); i++) if (ptr_available(b->c, k, i)) { g = PTR_BUCKET(b->c, k, i); if (KEY_DIRTY(k) || g->prio != BTREE_PRIO || (b->c->gc_mark_valid && GC_MARK(g) != GC_MARK_METADATA)) goto err; } mutex_unlock(&b->c->bucket_lock); } return false; err: mutex_unlock(&b->c->bucket_lock); bch_extent_to_text(buf, sizeof(buf), k); btree_bug(b, "inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu", buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), g->prio, g->gen, g->last_gc, GC_MARK(g)); return true; }
struct keyprint_hack bch_pkey(const struct bkey *k) { unsigned i = 0; struct keyprint_hack r; char *out = r.s, *end = r.s + KEYHACK_SIZE; #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__)) p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_OFFSET(k), KEY_SIZE(k)); if (KEY_PTRS(k)) while (1) { p("%llu:%llu gen %llu", PTR_DEV(k, i), PTR_OFFSET(k, i), PTR_GEN(k, i)); if (++i == KEY_PTRS(k)) break; p(", "); } p("]"); if (KEY_DIRTY(k)) p(" dirty"); if (KEY_CSUM(k)) p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]); #undef p return r; }
static void bch_subtract_dirty(struct bkey *k, struct cache_set *c, uint64_t offset, int sectors) { if (KEY_DIRTY(k)) bcache_dev_sectors_dirty_add(c, KEY_INODE(k), offset, -sectors); }
static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k) { struct btree *b = container_of(bk, struct btree, keys); struct bucket *g; unsigned i, stale; if (!KEY_PTRS(k) || bch_extent_invalid(bk, k)) return true; for (i = 0; i < KEY_PTRS(k); i++) if (!ptr_available(b->c, k, i)) return true; if (!expensive_debug_checks(b->c) && KEY_DIRTY(k)) return false; for (i = 0; i < KEY_PTRS(k); i++) { g = PTR_BUCKET(b->c, k, i); stale = ptr_stale(b->c, k, i); btree_bug_on(stale > 96, b, "key too stale: %i, need_gc %u", stale, b->c->need_gc); btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k), b, "stale dirty pointer"); if (stale) return true; if (expensive_debug_checks(b->c) && bch_extent_bad_expensive(b, k, i)) return true; } return false; }
bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k) { char buf[80]; if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k)) goto bad; if (__ptr_invalid(c, k)) goto bad; return false; bad: bch_extent_to_text(buf, sizeof(buf), k); cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k)); return true; }
static bool bch_extent_insert_fixup(struct btree_keys *b, struct bkey *insert, struct btree_iter *iter, struct bkey *replace_key) { struct cache_set *c = container_of(b, struct btree, keys)->c; uint64_t old_offset; unsigned old_size, sectors_found = 0; BUG_ON(!KEY_OFFSET(insert)); BUG_ON(!KEY_SIZE(insert)); while (1) { struct bkey *k = bch_btree_iter_next(iter); if (!k) break; if (bkey_cmp(&START_KEY(k), insert) >= 0) { if (KEY_SIZE(k)) break; else continue; } if (bkey_cmp(k, &START_KEY(insert)) <= 0) continue; old_offset = KEY_START(k); old_size = KEY_SIZE(k); /* * We might overlap with 0 size extents; we can't skip these * because if they're in the set we're inserting to we have to * adjust them so they don't overlap with the key we're * inserting. But we don't want to check them for replace * operations. */ if (replace_key && KEY_SIZE(k)) { /* * k might have been split since we inserted/found the * key we're replacing */ unsigned i; uint64_t offset = KEY_START(k) - KEY_START(replace_key); /* But it must be a subset of the replace key */ if (KEY_START(k) < KEY_START(replace_key) || KEY_OFFSET(k) > KEY_OFFSET(replace_key)) goto check_failed; /* We didn't find a key that we were supposed to */ if (KEY_START(k) > KEY_START(insert) + sectors_found) goto check_failed; if (!bch_bkey_equal_header(k, replace_key)) goto check_failed; /* skip past gen */ offset <<= 8; BUG_ON(!KEY_PTRS(replace_key)); for (i = 0; i < KEY_PTRS(replace_key); i++) if (k->ptr[i] != replace_key->ptr[i] + offset) goto check_failed; sectors_found = KEY_OFFSET(k) - KEY_START(insert); } if (bkey_cmp(insert, k) < 0 && bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) { /* * We overlapped in the middle of an existing key: that * means we have to split the old key. But we have to do * slightly different things depending on whether the * old key has been written out yet. */ struct bkey *top; bch_subtract_dirty(k, c, KEY_START(insert), KEY_SIZE(insert)); if (bkey_written(b, k)) { /* * We insert a new key to cover the top of the * old key, and the old key is modified in place * to represent the bottom split. * * It's completely arbitrary whether the new key * is the top or the bottom, but it has to match * up with what btree_sort_fixup() does - it * doesn't check for this kind of overlap, it * depends on us inserting a new key for the top * here. */ top = bch_bset_search(b, bset_tree_last(b), insert); bch_bset_insert(b, top, k); } else { BKEY_PADDED(key) temp; bkey_copy(&temp.key, k); bch_bset_insert(b, k, &temp.key); top = bkey_next(k); } bch_cut_front(insert, top); bch_cut_back(&START_KEY(insert), k); bch_bset_fix_invalidated_key(b, k); goto out; } if (bkey_cmp(insert, k) < 0) { bch_cut_front(insert, k); } else { if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) old_offset = KEY_START(insert); if (bkey_written(b, k) && bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) { /* * Completely overwrote, so we don't have to * invalidate the binary search tree */ bch_cut_front(k, k); } else { __bch_cut_back(&START_KEY(insert), k); bch_bset_fix_invalidated_key(b, k); } } bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k)); } check_failed: if (replace_key) { if (!sectors_found) { return true; } else if (sectors_found < KEY_SIZE(insert)) { SET_KEY_OFFSET(insert, KEY_OFFSET(insert) - (KEY_SIZE(insert) - sectors_found)); SET_KEY_SIZE(insert, sectors_found); } } out: if (KEY_DIRTY(insert)) bcache_dev_sectors_dirty_add(c, KEY_INODE(insert), KEY_START(insert), KEY_SIZE(insert)); return false; }