/* * Prepares for removal from one level of the hierarchy. The caller must * call delete_at() to remove the entry at index. */ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info, struct dm_btree_value_type *vt, dm_block_t root, uint64_t key, unsigned *index) { int i = *index, r; struct btree_node *n; for (;;) { r = shadow_step(s, root, vt); if (r < 0) break; /* * We have to patch up the parent node, ugly, but I don't * see a way to do this automatically as part of the spine * op. */ if (shadow_has_parent(s)) { __le64 location = cpu_to_le64(dm_block_location(shadow_current(s))); memcpy(value_ptr(dm_block_data(shadow_parent(s)), i), &location, sizeof(__le64)); } n = dm_block_data(shadow_current(s)); if (le32_to_cpu(n->header.flags) & LEAF_NODE) return do_leaf(n, key, index); r = rebalance_children(s, info, vt, key); if (r) break; n = dm_block_data(shadow_current(s)); if (le32_to_cpu(n->header.flags) & LEAF_NODE) return do_leaf(n, key, index); i = lower_bound(n, key); /* * We know the key is present, or else * rebalance_children would have returned * -ENODATA */ root = value64(n, i); } return r; }
static int btree_insert_raw(struct shadow_spine *s, dm_block_t root, struct dm_btree_value_type *vt, uint64_t key, unsigned *index) { int r, i = *index, top = 1; struct btree_node *node; for (;;) { r = shadow_step(s, root, vt); if (r < 0) return r; node = dm_block_data(shadow_current(s)); /* * We have to patch up the parent node, ugly, but I don't * see a way to do this automatically as part of the spine * op. */ if (shadow_has_parent(s) && i >= 0) { /* FIXME: second clause unness. */ __le64 location = cpu_to_le64(dm_block_location(shadow_current(s))); __dm_bless_for_disk(&location); memcpy_disk(value_ptr(dm_block_data(shadow_parent(s)), i), &location, sizeof(__le64)); } node = dm_block_data(shadow_current(s)); if (node->header.nr_entries == node->header.max_entries) { if (top) r = btree_split_beneath(s, key); else r = btree_split_sibling(s, root, i, key); if (r < 0) return r; } node = dm_block_data(shadow_current(s)); i = lower_bound(node, key); if (le32_to_cpu(node->header.flags) & LEAF_NODE) break; if (i < 0) { /* change the bounds on the lowest key */ node->keys[0] = cpu_to_le64(key); i = 0; } root = value64(node, i); top = 0; } if (i < 0 || le64_to_cpu(node->keys[i]) != key) i++; *index = i; return 0; }
/* * Splits a node by creating a sibling node and shifting half the nodes * contents across. Assumes there is a parent node, and it has room for * another child. * * Before: * +--------+ * | Parent | * +--------+ * | * v * +----------+ * | A ++++++ | * +----------+ * * * After: * +--------+ * | Parent | * +--------+ * | | * v +------+ * +---------+ | * | A* +++ | v * +---------+ +-------+ * | B +++ | * +-------+ * * Where A* is a shadow of A. */ static int btree_split_sibling(struct shadow_spine *s, dm_block_t root, unsigned parent_index, uint64_t key) { int r; size_t size; unsigned nr_left, nr_right; struct dm_block *left, *right, *parent; struct btree_node *ln, *rn, *pn; __le64 location; left = shadow_current(s); r = new_block(s->info, &right); if (r < 0) return r; ln = dm_block_data(left); rn = dm_block_data(right); nr_left = le32_to_cpu(ln->header.nr_entries) / 2; nr_right = le32_to_cpu(ln->header.nr_entries) - nr_left; ln->header.nr_entries = cpu_to_le32(nr_left); rn->header.flags = ln->header.flags; rn->header.nr_entries = cpu_to_le32(nr_right); rn->header.max_entries = ln->header.max_entries; rn->header.value_size = ln->header.value_size; memcpy(rn->keys, ln->keys + nr_left, nr_right * sizeof(rn->keys[0])); size = le32_to_cpu(ln->header.flags) & INTERNAL_NODE ? sizeof(uint64_t) : s->info->value_type.size; memcpy(value_ptr(rn, 0), value_ptr(ln, nr_left), size * nr_right); /* * Patch up the parent */ parent = shadow_parent(s); pn = dm_block_data(parent); location = cpu_to_le64(dm_block_location(left)); __dm_bless_for_disk(&location); memcpy_disk(value_ptr(pn, parent_index), &location, sizeof(__le64)); location = cpu_to_le64(dm_block_location(right)); __dm_bless_for_disk(&location); r = insert_at(sizeof(__le64), pn, parent_index + 1, le64_to_cpu(rn->keys[0]), &location); if (r) return r; if (key < le64_to_cpu(rn->keys[0])) { unlock_block(s->info, right); s->nodes[1] = left; } else { unlock_block(s->info, left); s->nodes[1] = right; } return 0; }