static void dump_bset(struct btree *b, struct bset *i) { struct bkey *k; unsigned j; for (k = i->start; k < end(i); k = bkey_next(k)) { printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b), (uint64_t *) k - i->d, i->keys, pkey(k)); for (j = 0; j < KEY_PTRS(k); j++) { size_t n = PTR_BUCKET_NR(b->c, k, j); printk(" bucket %zu", n); if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets) printk(" prio %i", PTR_BUCKET(b->c, k, j)->prio); } printk(" %s\n", bch_ptr_status(b->c, k)); if (bkey_next(k) < end(i) && skipped_backwards(b, k)) printk(KERN_ERR "Key skipped backwards\n"); } }
static void sort_key_next(struct btree_iter *iter, struct btree_iter_set *i) { i->k = bkey_next(i->k); if (i->k == i->end) *i = iter->data[--iter->used]; }
static bool bch_extent_insert_fixup(struct btree_keys *b, struct bkey *insert, struct btree_iter *iter, struct bkey *replace_key) { struct cache_set *c = container_of(b, struct btree, keys)->c; uint64_t old_offset; unsigned old_size, sectors_found = 0; BUG_ON(!KEY_OFFSET(insert)); BUG_ON(!KEY_SIZE(insert)); while (1) { struct bkey *k = bch_btree_iter_next(iter); if (!k) break; if (bkey_cmp(&START_KEY(k), insert) >= 0) { if (KEY_SIZE(k)) break; else continue; } if (bkey_cmp(k, &START_KEY(insert)) <= 0) continue; old_offset = KEY_START(k); old_size = KEY_SIZE(k); /* * We might overlap with 0 size extents; we can't skip these * because if they're in the set we're inserting to we have to * adjust them so they don't overlap with the key we're * inserting. But we don't want to check them for replace * operations. */ if (replace_key && KEY_SIZE(k)) { /* * k might have been split since we inserted/found the * key we're replacing */ unsigned i; uint64_t offset = KEY_START(k) - KEY_START(replace_key); /* But it must be a subset of the replace key */ if (KEY_START(k) < KEY_START(replace_key) || KEY_OFFSET(k) > KEY_OFFSET(replace_key)) goto check_failed; /* We didn't find a key that we were supposed to */ if (KEY_START(k) > KEY_START(insert) + sectors_found) goto check_failed; if (!bch_bkey_equal_header(k, replace_key)) goto check_failed; /* skip past gen */ offset <<= 8; BUG_ON(!KEY_PTRS(replace_key)); for (i = 0; i < KEY_PTRS(replace_key); i++) if (k->ptr[i] != replace_key->ptr[i] + offset) goto check_failed; sectors_found = KEY_OFFSET(k) - KEY_START(insert); } if (bkey_cmp(insert, k) < 0 && bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) { /* * We overlapped in the middle of an existing key: that * means we have to split the old key. But we have to do * slightly different things depending on whether the * old key has been written out yet. */ struct bkey *top; bch_subtract_dirty(k, c, KEY_START(insert), KEY_SIZE(insert)); if (bkey_written(b, k)) { /* * We insert a new key to cover the top of the * old key, and the old key is modified in place * to represent the bottom split. * * It's completely arbitrary whether the new key * is the top or the bottom, but it has to match * up with what btree_sort_fixup() does - it * doesn't check for this kind of overlap, it * depends on us inserting a new key for the top * here. */ top = bch_bset_search(b, bset_tree_last(b), insert); bch_bset_insert(b, top, k); } else { BKEY_PADDED(key) temp; bkey_copy(&temp.key, k); bch_bset_insert(b, k, &temp.key); top = bkey_next(k); } bch_cut_front(insert, top); bch_cut_back(&START_KEY(insert), k); bch_bset_fix_invalidated_key(b, k); goto out; } if (bkey_cmp(insert, k) < 0) { bch_cut_front(insert, k); } else { if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) old_offset = KEY_START(insert); if (bkey_written(b, k) && bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) { /* * Completely overwrote, so we don't have to * invalidate the binary search tree */ bch_cut_front(k, k); } else { __bch_cut_back(&START_KEY(insert), k); bch_bset_fix_invalidated_key(b, k); } } bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k)); } check_failed: if (replace_key) { if (!sectors_found) { return true; } else if (sectors_found < KEY_SIZE(insert)) { SET_KEY_OFFSET(insert, KEY_OFFSET(insert) - (KEY_SIZE(insert) - sectors_found)); SET_KEY_SIZE(insert, sectors_found); } } out: if (KEY_DIRTY(insert)) bcache_dev_sectors_dirty_add(c, KEY_INODE(insert), KEY_START(insert), KEY_SIZE(insert)); return false; }
static bool skipped_backwards(struct btree *b, struct bkey *k) { return bkey_cmp(k, (!b->level) ? &START_KEY(bkey_next(k)) : bkey_next(k)) > 0; }