/* * FIXME: We shouldn't use a recursive algorithm when we have limited stack * space. Also this only works for single level trees. */ static int walk_node(struct dm_btree_info *info, dm_block_t block, int (*fn)(void *context, uint64_t *keys, void *leaf), void *context) { int r; unsigned i, nr; struct dm_block *node; struct btree_node *n; uint64_t keys; r = bn_read_lock(info, block, &node); if (r) return r; n = dm_block_data(node); nr = le32_to_cpu(n->header.nr_entries); for (i = 0; i < nr; i++) { if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) { r = walk_node(info, value64(n, i), fn, context); if (r) goto out; } else { keys = le64_to_cpu(*key_ptr(n, i)); r = fn(context, &keys, value_ptr(n, i)); if (r) goto out; } } out: dm_tm_unlock(info->tm, node); return r; }
static void pop_frame(struct del_stack *s) { struct frame *f = s->spine + s->top--; dm_tm_dec(s->tm, dm_block_location(f->b)); dm_tm_unlock(s->tm, f->b); }
static int rebalance_children(struct shadow_spine *s, struct dm_btree_info *info, struct dm_btree_value_type *vt, uint64_t key) { int i, r, has_left_sibling, has_right_sibling; uint32_t child_entries; struct btree_node *n; n = dm_block_data(shadow_current(s)); if (le32_to_cpu(n->header.nr_entries) == 1) { struct dm_block *child; dm_block_t b = value64(n, 0); r = dm_tm_read_lock(info->tm, b, &btree_node_validator, &child); if (r) return r; memcpy(n, dm_block_data(child), dm_bm_block_size(dm_tm_get_bm(info->tm))); r = dm_tm_unlock(info->tm, child); if (r) return r; dm_tm_dec(info->tm, dm_block_location(child)); return 0; } i = lower_bound(n, key); if (i < 0) return -ENODATA; r = get_nr_entries(info->tm, value64(n, i), &child_entries); if (r) return r; has_left_sibling = i > 0; has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1); if (!has_left_sibling) r = rebalance2(s, info, vt, i); else if (!has_right_sibling) r = rebalance2(s, info, vt, i - 1); else r = rebalance3(s, info, vt, i - 1); return r; }
static int get_nr_entries(struct dm_transaction_manager *tm, dm_block_t b, uint32_t *result) { int r; struct dm_block *block; struct btree_node *n; r = dm_tm_read_lock(tm, b, &btree_node_validator, &block); if (r) return r; n = dm_block_data(block); *result = le32_to_cpu(n->header.nr_entries); return dm_tm_unlock(tm, block); }
int unlock_block(struct dm_btree_info *info, struct dm_block *b) { return dm_tm_unlock(info->tm, b); }
/* * Unlocks an array block. */ static int unlock_ablock(struct dm_array_info *info, struct dm_block *block) { return dm_tm_unlock(info->btree_info.tm, block); }
static int exit_child(struct dm_btree_info *info, struct child *c) { return dm_tm_unlock(info->tm, c->block); }