/* * Read locks a block, and coerces it to an array block. The caller must * unlock 'block' when finished. */ static int get_ablock(struct dm_array_info *info, dm_block_t b, struct dm_block **block, struct array_block **ab) { int r; r = dm_tm_read_lock(info->btree_info.tm, b, &array_validator, block); if (r) return r; *ab = dm_block_data(*block); return 0; }
static int rebalance_children(struct shadow_spine *s, struct dm_btree_info *info, struct dm_btree_value_type *vt, uint64_t key) { int i, r, has_left_sibling, has_right_sibling; uint32_t child_entries; struct btree_node *n; n = dm_block_data(shadow_current(s)); if (le32_to_cpu(n->header.nr_entries) == 1) { struct dm_block *child; dm_block_t b = value64(n, 0); r = dm_tm_read_lock(info->tm, b, &btree_node_validator, &child); if (r) return r; memcpy(n, dm_block_data(child), dm_bm_block_size(dm_tm_get_bm(info->tm))); r = dm_tm_unlock(info->tm, child); if (r) return r; dm_tm_dec(info->tm, dm_block_location(child)); return 0; } i = lower_bound(n, key); if (i < 0) return -ENODATA; r = get_nr_entries(info->tm, value64(n, i), &child_entries); if (r) return r; has_left_sibling = i > 0; has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1); if (!has_left_sibling) r = rebalance2(s, info, vt, i); else if (!has_right_sibling) r = rebalance2(s, info, vt, i - 1); else r = rebalance3(s, info, vt, i - 1); return r; }
static int get_nr_entries(struct dm_transaction_manager *tm, dm_block_t b, uint32_t *result) { int r; struct dm_block *block; struct btree_node *n; r = dm_tm_read_lock(tm, b, &btree_node_validator, &block); if (r) return r; n = dm_block_data(block); *result = le32_to_cpu(n->header.nr_entries); return dm_tm_unlock(tm, block); }
static int push_frame(struct del_stack *s, dm_block_t b, unsigned level) { int r; uint32_t ref_count; if (s->top >= MAX_SPINE_DEPTH - 1) { DMERR("btree deletion stack out of memory"); return -ENOMEM; } r = dm_tm_ref(s->tm, b, &ref_count); if (r) return r; if (ref_count > 1) /* * This is a shared node, so we can just decrement it's * reference counter and leave the children. */ dm_tm_dec(s->tm, b); else { uint32_t flags; struct frame *f = s->spine + ++s->top; r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b); if (r) { s->top--; return r; } f->n = dm_block_data(f->b); f->level = level; f->nr_children = le32_to_cpu(f->n->header.nr_entries); f->current_child = 0; flags = le32_to_cpu(f->n->header.flags); if (flags & INTERNAL_NODE || is_internal_level(s->info, f)) prefetch_children(s, f); } return 0; }
static int bn_read_lock(struct dm_btree_info *info, dm_block_t b, struct dm_block **result) { return dm_tm_read_lock(info->tm, b, &btree_node_validator, result); }