struct buffer_head *new_leaf(struct btree *btree) { struct buffer_head *buffer = new_block(btree); if (!IS_ERR(buffer)) { memset(bufdata(buffer), 0, bufsize(buffer)); (btree->ops->leaf_init)(btree, bufdata(buffer)); mark_buffer_dirty_atomic(buffer); } return buffer; }
struct buffer_head *new_leaf(struct btree *btree) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct buffer_head *buffer = new_block(btree); if (!IS_ERR(buffer)) { memset(bufdata(buffer), 0, bufsize(buffer)); (btree->ops->leaf_init)(btree, bufdata(buffer)); mark_buffer_dirty_atomic(buffer); } return buffer; }
/* * Recursively redirect non-dirty buffers on path to modify leaf. * * Redirect order is from root to leaf. Otherwise, blocks of path will * be allocated by reverse order. * * FIXME: We can allocate/copy blocks before change common ancestor * (before changing common ancestor, changes are not visible for * reader). With this, we may be able to reduce locking time. */ int cursor_redirect(struct cursor *cursor) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct btree *btree = cursor->btree; struct sb *sb = btree->sb; int level; for (level = 0; level <= btree->root.depth; level++) { struct buffer_head *buffer, *clone; block_t parent, oldblock, newblock; struct index_entry *entry; int redirect, is_leaf = (level == btree->root.depth); buffer = cursor->path[level].buffer; /* If buffer needs to redirect to dirty, redirect it */ if (is_leaf) redirect = leaf_need_redirect(sb, buffer); else redirect = bnode_need_redirect(sb, buffer); /* No need to redirect */ if (!redirect) continue; /* Redirect buffer before changing */ clone = new_block(btree); if (IS_ERR(clone)) return PTR_ERR(clone); oldblock = bufindex(buffer); newblock = bufindex(clone); trace("redirect %Lx to %Lx", oldblock, newblock); level_redirect_blockput(cursor, level, clone); if (is_leaf) { /* This is leaf buffer */ mark_buffer_dirty_atomic(clone); log_leaf_redirect(sb, oldblock, newblock); defer_bfree(&sb->defree, oldblock, 1); } else { /* This is bnode buffer */ mark_buffer_unify_atomic(clone); log_bnode_redirect(sb, oldblock, newblock); defer_bfree(&sb->deunify, oldblock, 1); } trace("update parent"); if (!level) { /* Update pointer in btree->root */ trace("redirect root"); assert(oldblock == btree->root.block); btree->root.block = newblock; tux3_mark_btree_dirty(btree); continue; } /* Update entry on parent for the redirected block */ parent = bufindex(cursor->path[level - 1].buffer); entry = cursor->path[level - 1].next - 1; entry->block = cpu_to_be64(newblock); log_bnode_update(sb, parent, newblock, be64_to_cpu(entry->key)); } cursor_check(cursor); return 0; }
int cursor_redirect(struct cursor *cursor) { struct btree *btree = cursor->btree; unsigned level = btree->root.depth; struct sb *sb = btree->sb; block_t uninitialized_var(child); while (1) { struct buffer_head *buffer; block_t uninitialized_var(oldblock); block_t uninitialized_var(newblock); int redirect, is_leaf = (level == btree->root.depth); buffer = cursor->path[level].buffer; /* If buffer needs to redirect to dirty, redirect it */ if (is_leaf) redirect = leaf_need_redirect(sb, buffer); else redirect = bnode_need_redirect(sb, buffer); if (redirect) { /* Redirect buffer before changing */ struct buffer_head *clone = new_block(btree); if (IS_ERR(clone)) return PTR_ERR(clone); oldblock = bufindex(buffer); newblock = bufindex(clone); trace("redirect %Lx to %Lx", oldblock, newblock); level_redirect_blockput(cursor, level, clone); if (is_leaf) { /* This is leaf buffer */ mark_buffer_dirty_atomic(clone); log_leaf_redirect(sb, oldblock, newblock); defer_bfree(&sb->defree, oldblock, 1); goto parent_level; } /* This is bnode buffer */ mark_buffer_rollup_atomic(clone); log_bnode_redirect(sb, oldblock, newblock); defer_bfree(&sb->derollup, oldblock, 1); } else { if (is_leaf) { /* This is leaf buffer */ goto parent_level; } } /* Update entry for the redirected child block */ trace("update parent"); block_t block = bufindex(cursor->path[level].buffer); struct index_entry *entry = cursor->path[level].next - 1; entry->block = cpu_to_be64(child); log_bnode_update(sb, block, child, be64_to_cpu(entry->key)); parent_level: /* If it is already redirected, ancestor is also redirected */ if (!redirect) { cursor_check(cursor); return 0; } if (!level--) { trace("redirect root"); assert(oldblock == btree->root.block); btree->root.block = newblock; tux3_mark_btree_dirty(btree); cursor_check(cursor); return 0; } child = newblock; } }