/* * Split leaf, then insert to parent. * key: key to add after split (cursor will point leaf which is including key) * hint: hint for split * * return value: * 0 - success * < 0 - error */ static int btree_leaf_split(struct cursor *cursor, tuxkey_t key, tuxkey_t hint) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } trace("split leaf"); struct btree *btree = cursor->btree; struct buffer_head *newbuf; newbuf = new_leaf(btree); if (IS_ERR(newbuf)) return PTR_ERR(newbuf); log_balloc(btree->sb, bufindex(newbuf), 1); struct buffer_head *leafbuf = cursor_leafbuf(cursor); tuxkey_t newkey = btree->ops->leaf_split(btree, hint, bufdata(leafbuf), bufdata(newbuf)); assert(cursor_this_key(cursor) < newkey); assert(newkey < cursor_next_key(cursor)); if (key < newkey) mark_buffer_dirty_non(newbuf); else mark_buffer_dirty_non(leafbuf); return insert_leaf(cursor, newkey, newbuf, key < newkey); }
static loff_t unatom_dict_write(struct inode *atable, atom_t atom, loff_t where) { unsigned delta = tux3_get_current_delta(); struct buffer_head *buffer, *clone; loff_t old; unsigned offset; buffer = blockread_unatom(atable, atom, &offset); if (!buffer) return -EIO; /* * The atable is protected by i_mutex for now. * blockdirty() should never return -EAGAIN. * FIXME: need finer granularity locking */ clone = blockdirty(buffer, delta); if (IS_ERR(clone)) { assert(PTR_ERR(clone) != -EAGAIN); blockput(buffer); return PTR_ERR(clone); } __be64 *unatom_dict = bufdata(clone); old = be64_to_cpu(unatom_dict[offset]); unatom_dict[offset] = cpu_to_be64(where); mark_buffer_dirty_non(clone); blockput(clone); return old; }
/* Modify buffer of refcount, then release buffer */ static int update_refcount(struct sb *sb, struct buffer_head *buffer, unsigned offset, u16 val) { unsigned delta = tux3_get_current_delta(); struct buffer_head *clone; __be16 *refcount; /* * The atable is protected by i_mutex for now. * blockdirty() should never return -EAGAIN. * FIXME: need finer granularity locking */ clone = blockdirty(buffer, delta); if (IS_ERR(clone)) { assert(PTR_ERR(clone) != -EAGAIN); blockput(buffer); return PTR_ERR(clone); } refcount = bufdata(clone); refcount[offset] = cpu_to_be16(val); mark_buffer_dirty_non(clone); blockput(clone); return 0; }
static void tux_update_entry(struct buffer_head *buffer, tux_dirent *entry, inum_t inum, umode_t mode) { entry->inum = cpu_to_be64(inum); entry->type = tux_type_by_mode[(mode & S_IFMT) >> STAT_SHIFT]; mark_buffer_dirty_non(buffer); blockput(buffer); }
/* * Split leaf, then insert to parent. * key: key to add after split (cursor will point leaf which is including key) * hint: hint for split * * return value: * 0 - success * < 0 - error */ static int btree_leaf_split(struct cursor *cursor, tuxkey_t key, tuxkey_t hint) { trace("split leaf"); struct btree *btree = cursor->btree; struct buffer_head *newbuf; newbuf = new_leaf(btree); if (IS_ERR(newbuf)) return PTR_ERR(newbuf); log_balloc(btree->sb, bufindex(newbuf), 1); struct buffer_head *leafbuf = cursor_leafbuf(cursor); tuxkey_t newkey = btree->ops->leaf_split(btree, hint, bufdata(leafbuf), bufdata(newbuf)); assert(cursor_this_key(cursor) < newkey); assert(newkey < cursor_next_key(cursor)); if (key < newkey) mark_buffer_dirty_non(newbuf); else mark_buffer_dirty_non(leafbuf); return insert_leaf(cursor, newkey, newbuf, key < newkey); }
static void add_maps(struct inode *inode, block_t index, struct block_segment *seg, int nr_segs) { unsigned delta = tux3_get_current_delta(); for (int i = 0; i < nr_segs; i++) { struct block_segment *s = &seg[i]; for (unsigned j = 0; j < s->count; j++) { struct buffer_head *buf; buf = blockget(inode->map, index + j); buf = blockdirty(buf, delta); memset(buf->data, 0, inode->i_sb->blocksize); *(block_t *)buf->data = s->block + j; mark_buffer_dirty_non(buf); blockput(buf); } index += s->count; } }
int tux_delete_entry(struct inode *dir, struct buffer_head *buffer, tux_dirent *entry) { unsigned delta = tux3_get_current_delta(); tux_dirent *prev = NULL, *this = bufdata(buffer); struct buffer_head *clone; void *olddata; while ((char *)this < (char *)entry) { if (this->rec_len == 0) { blockput(buffer); tux_zero_len_error(dir, bufindex(buffer)); return -EIO; } prev = this; this = next_entry(this); } /* * The directory is protected by i_mutex. * blockdirty() should never return -EAGAIN. */ olddata = bufdata(buffer); clone = blockdirty(buffer, delta); if (IS_ERR(clone)) { assert(PTR_ERR(clone) != -EAGAIN); blockput(buffer); return PTR_ERR(clone); } entry = ptr_redirect(entry, olddata, bufdata(clone)); prev = ptr_redirect(prev, olddata, bufdata(clone)); if (prev) prev->rec_len = tux_rec_len_to_disk((void *)next_entry(entry) - (void *)prev); memset(entry->name, 0, entry->name_len); entry->name_len = entry->type = 0; entry->inum = 0; mark_buffer_dirty_non(clone); blockput(clone); return 0; }
int btree_write(struct cursor *cursor, struct btree_key_range *key) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct btree *btree = cursor->btree; struct btree_ops *ops = btree->ops; tuxkey_t split_hint; int err; /* FIXME: we might be better to support multiple leaves */ err = cursor_redirect(cursor); if (err) return err; while (key->len > 0) { tuxkey_t bottom = cursor_this_key(cursor); tuxkey_t limit = cursor_next_key(cursor); void *leaf = bufdata(cursor_leafbuf(cursor)); int need_split; assert(bottom <= key->start && key->start < limit); assert(ops->leaf_sniff(btree, leaf)); need_split = ops->leaf_write(btree, bottom, limit, leaf, key, &split_hint); if (need_split < 0) return need_split; else if (!need_split) { mark_buffer_dirty_non(cursor_leafbuf(cursor)); continue; } err = btree_leaf_split(cursor, key->start, split_hint); if (err) return err; /* FIXME: error handling */ } return 0; }
int btree_write(struct cursor *cursor, struct btree_key_range *key) { struct btree *btree = cursor->btree; struct btree_ops *ops = btree->ops; tuxkey_t split_hint; int err; /* FIXME: we might be better to support multiple leaves */ err = cursor_redirect(cursor); if (err) return err; while (key->len > 0) { tuxkey_t bottom = cursor_this_key(cursor); tuxkey_t limit = cursor_next_key(cursor); void *leaf = bufdata(cursor_leafbuf(cursor)); assert(bottom <= key->start && key->start < limit); assert(ops->leaf_sniff(btree, leaf)); err = ops->leaf_write(btree, bottom, limit, leaf, key, &split_hint); if (!err) { mark_buffer_dirty_non(cursor_leafbuf(cursor)); continue; } assert(err == -ENOSPC); err = btree_leaf_split(cursor, key->start, split_hint); if (err) break; /* FIXME: error handling */ } return err; }
/* * This is range deletion. So, instead of adjusting balance of the * space on sibling nodes for each change, this just removes the range * and merges from right to left even if it is not same parent. * * +--------------- (A, B, C)--------------------+ * | | | * +-- (AA, AB, AC) -+ +- (BA, BB, BC) -+ + (CA, CB, CC) + * | | | | | | | | | * (AAA,AAB)(ABA,ABB)(ACA,ACB) (BAA,BAB)(BBA)(BCA,BCB) (CAA)(CBA,CBB)(CCA) * * [less : A, AA, AAA, AAB, AB, ABA, ABB, AC, ACA, ACB, B, BA ... : greater] * * If we merged from cousin (or re-distributed), we may have to update * the index until common parent. (e.g. removed (ACB), then merged * from (BAA,BAB) to (ACA), we have to adjust B in root node to BB) * * See, adjust_parent_sep(). * * FIXME: no re-distribute. so, we don't guarantee above than 50% * space efficiency. And if range is end of key (truncate() case), we * don't need to merge, and adjust_parent_sep(). * * FIXME2: we may want to split chop work for each step. instead of * blocking for a long time. */ int btree_chop(struct btree *btree, tuxkey_t start, u64 len) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct sb *sb = btree->sb; struct btree_ops *ops = btree->ops; struct buffer_head **prev, *leafprev = NULL; struct chopped_index_info *cii; struct cursor *cursor; tuxkey_t limit; int ret, done = 0; if (!has_root(btree)) return 0; /* Chop all range if len >= TUXKEY_LIMIT */ limit = (len >= TUXKEY_LIMIT) ? TUXKEY_LIMIT : start + len; prev = malloc(sizeof(*prev) * btree->root.depth); if (prev == NULL) return -ENOMEM; memset(prev, 0, sizeof(*prev) * btree->root.depth); cii = malloc(sizeof(*cii) * btree->root.depth); if (cii == NULL) { ret = -ENOMEM; goto error_cii; } memset(cii, 0, sizeof(*cii) * btree->root.depth); cursor = alloc_cursor(btree, 0); if (!cursor) { ret = -ENOMEM; goto error_alloc_cursor; } down_write(&btree->lock); ret = btree_probe(cursor, start); if (ret) goto error_btree_probe; /* Walk leaves */ while (1) { struct buffer_head *leafbuf; tuxkey_t this_key; /* * FIXME: If leaf was merged and freed later, we don't * need to redirect leaf and leaf_chop() */ if ((ret = cursor_redirect(cursor))) goto out; leafbuf = cursor_pop(cursor); /* Adjust start and len for this leaf */ this_key = cursor_level_this_key(cursor); if (start < this_key) { if (limit < TUXKEY_LIMIT) len -= this_key - start; start = this_key; } ret = ops->leaf_chop(btree, start, len, bufdata(leafbuf)); if (ret) { if (ret < 0) { blockput(leafbuf); goto out; } mark_buffer_dirty_non(leafbuf); } /* Try to merge this leaf with prev */ if (leafprev) { if (try_leaf_merge(btree, leafprev, leafbuf)) { trace(">>> can merge leaf %p into leaf %p", leafbuf, leafprev); remove_index(cursor, cii); mark_buffer_dirty_non(leafprev); blockput_free(sb, leafbuf); goto keep_prev_leaf; } blockput(leafprev); } leafprev = leafbuf; keep_prev_leaf: if (cursor_level_next_key(cursor) >= limit) done = 1; /* Pop and try to merge finished nodes */ while (done || cursor_level_finished(cursor)) { struct buffer_head *buf; int level = cursor->level; struct chopped_index_info *ciil = &cii[level]; /* Get merge src buffer, and go parent level */ buf = cursor_pop(cursor); /* * Logging chopped indexes * FIXME: If node is freed later (e.g. merged), * we dont't need to log this */ if (ciil->count) { log_bnode_del(sb, bufindex(buf), ciil->start, ciil->count); } memset(ciil, 0, sizeof(*ciil)); /* Try to merge node with prev */ if (prev[level]) { assert(level); if (try_bnode_merge(sb, prev[level], buf)) { trace(">>> can merge node %p into node %p", buf, prev[level]); remove_index(cursor, cii); mark_buffer_unify_non(prev[level]); blockput_free_unify(sb, buf); goto keep_prev_node; } blockput(prev[level]); } prev[level] = buf; keep_prev_node: if (!level) goto chop_root; } /* Push back down to leaf level */ do { ret = cursor_advance_down(cursor); if (ret < 0) goto out; } while (ret); } chop_root: /* Remove depth if possible */ while (btree->root.depth > 1 && bcount(bufdata(prev[0])) == 1) { trace("drop btree level"); btree->root.block = bufindex(prev[1]); btree->root.depth--; tux3_mark_btree_dirty(btree); /* * We know prev[0] is redirected and dirty. So, in * here, we can just cancel bnode_redirect by bfree(), * instead of defered_bfree() * FIXME: we can optimize freeing bnode without * bnode_redirect, and if we did, this is not true. */ bfree(sb, bufindex(prev[0]), 1); log_bnode_free(sb, bufindex(prev[0])); blockput_free_unify(sb, prev[0]); vecmove(prev, prev + 1, btree->root.depth); } ret = 0; out: if (leafprev) blockput(leafprev); for (int i = 0; i < btree->root.depth; i++) { if (prev[i]) blockput(prev[i]); } release_cursor(cursor); error_btree_probe: up_write(&btree->lock); free_cursor(cursor); error_alloc_cursor: free(cii); error_cii: free(prev); return ret; }
int alloc_empty_btree(struct btree *btree) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct sb *sb = btree->sb; struct buffer_head *rootbuf = new_node(btree); if (IS_ERR(rootbuf)) goto error; struct buffer_head *leafbuf = new_leaf(btree); if (IS_ERR(leafbuf)) goto error_leafbuf; assert(!has_root(btree)); struct bnode *rootnode = bufdata(rootbuf); block_t rootblock = bufindex(rootbuf); block_t leafblock = bufindex(leafbuf); trace("root at %Lx", rootblock); trace("leaf at %Lx", leafblock); bnode_init_root(rootnode, 1, leafblock, 0, 0); log_bnode_root(sb, rootblock, 1, leafblock, 0, 0); log_balloc(sb, leafblock, 1); mark_buffer_unify_non(rootbuf); blockput(rootbuf); mark_buffer_dirty_non(leafbuf); blockput(leafbuf); btree->root = (struct root){ .block = rootblock, .depth = 1 }; tux3_mark_btree_dirty(btree); return 0; error_leafbuf: (btree->ops->bfree)(sb, bufindex(rootbuf), 1); blockput(rootbuf); rootbuf = leafbuf; error: return PTR_ERR(rootbuf); } /* FIXME: right? and this should be done by btree_chop()? */ int free_empty_btree(struct btree *btree) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct btree_ops *ops = btree->ops; if (!has_root(btree)) return 0; assert(btree->root.depth == 1); struct sb *sb = btree->sb; struct buffer_head *rootbuf = vol_bread(sb, btree->root.block); if (!rootbuf) return -EIO; assert(bnode_sniff(bufdata(rootbuf))); /* Make btree has no root */ btree->root = no_root; tux3_mark_btree_dirty(btree); struct bnode *rootnode = bufdata(rootbuf); assert(bcount(rootnode) == 1); block_t leaf = be64_to_cpu(rootnode->entries[0].block); struct buffer_head *leafbuf = vol_find_get_block(sb, leaf); if (leafbuf && !leaf_need_redirect(sb, leafbuf)) { /* * This is redirected leaf. So, in here, we can just * cancel leaf_redirect by bfree(), instead of * defered_bfree(). */ bfree(sb, leaf, 1); log_leaf_free(sb, leaf); assert(ops->leaf_can_free(btree, bufdata(leafbuf))); blockput_free(sb, leafbuf); } else { defer_bfree(&sb->defree, leaf, 1); log_bfree(sb, leaf, 1); if (leafbuf) { assert(ops->leaf_can_free(btree, bufdata(leafbuf))); blockput(leafbuf); } } if (!bnode_need_redirect(sb, rootbuf)) { /* * This is redirected bnode. So, in here, we can just * cancel bnode_redirect by bfree(), instead of * defered_bfree(). */ bfree(sb, bufindex(rootbuf), 1); log_bnode_free(sb, bufindex(rootbuf)); blockput_free_unify(sb, rootbuf); } else { defer_bfree(&sb->deunify, bufindex(rootbuf), 1); log_bfree_on_unify(sb, bufindex(rootbuf), 1); blockput(rootbuf); } return 0; } int replay_bnode_redirect(struct replay *rp, block_t oldblock, block_t newblock) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct sb *sb = rp->sb; struct buffer_head *newbuf, *oldbuf; int err = 0; newbuf = vol_getblk(sb, newblock); if (!newbuf) { err = -ENOMEM; /* FIXME: error code */ goto error; } oldbuf = vol_bread(sb, oldblock); if (!oldbuf) { err = -EIO; /* FIXME: error code */ goto error_put_newbuf; } assert(bnode_sniff(bufdata(oldbuf))); memcpy(bufdata(newbuf), bufdata(oldbuf), bufsize(newbuf)); mark_buffer_unify_atomic(newbuf); blockput(oldbuf); error_put_newbuf: blockput(newbuf); error: return err; } int replay_bnode_root(struct replay *rp, block_t root, unsigned count, block_t left, block_t right, tuxkey_t rkey) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct sb *sb = rp->sb; struct buffer_head *rootbuf; rootbuf = vol_getblk(sb, root); if (!rootbuf) return -ENOMEM; bnode_buffer_init(rootbuf); bnode_init_root(bufdata(rootbuf), count, left, right, rkey); mark_buffer_unify_atomic(rootbuf); blockput(rootbuf); return 0; } /* * Before this replay, replay should already dirty the buffer of src. * (e.g. by redirect) */ int replay_bnode_split(struct replay *rp, block_t src, unsigned pos, block_t dst) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct sb *sb = rp->sb; struct buffer_head *srcbuf, *dstbuf; int err = 0; srcbuf = vol_getblk(sb, src); if (!srcbuf) { err = -ENOMEM; /* FIXME: error code */ goto error; } dstbuf = vol_getblk(sb, dst); if (!dstbuf) { err = -ENOMEM; /* FIXME: error code */ goto error_put_srcbuf; } bnode_buffer_init(dstbuf); bnode_split(bufdata(srcbuf), pos, bufdata(dstbuf)); mark_buffer_unify_non(srcbuf); mark_buffer_unify_atomic(dstbuf); blockput(dstbuf); error_put_srcbuf: blockput(srcbuf); error: return err; } /* * Before this replay, replay should already dirty the buffer of bnodeblock. * (e.g. by redirect) */ static int replay_bnode_change(struct sb *sb, block_t bnodeblock, u64 val1, u64 val2, void (*change)(struct bnode *, u64, u64)) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct buffer_head *bnodebuf; bnodebuf = vol_getblk(sb, bnodeblock); if (!bnodebuf) return -ENOMEM; /* FIXME: error code */ struct bnode *bnode = bufdata(bnodebuf); change(bnode, val1, val2); mark_buffer_unify_non(bnodebuf); blockput(bnodebuf); return 0; } static void add_func(struct bnode *bnode, u64 child, u64 key) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct index_entry *entry = bnode_lookup(bnode, key) + 1; bnode_add_index(bnode, entry, child, key); } int replay_bnode_add(struct replay *rp, block_t parent, block_t child, tuxkey_t key) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } return replay_bnode_change(rp->sb, parent, child, key, add_func); } static void update_func(struct bnode *bnode, u64 child, u64 key) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct index_entry *entry = bnode_lookup(bnode, key); assert(be64_to_cpu(entry->key) == key); entry->block = cpu_to_be64(child); } int replay_bnode_update(struct replay *rp, block_t parent, block_t child, tuxkey_t key) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } return replay_bnode_change(rp->sb, parent, child, key, update_func); } int replay_bnode_merge(struct replay *rp, block_t src, block_t dst) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct sb *sb = rp->sb; struct buffer_head *srcbuf, *dstbuf; int err = 0, ret; srcbuf = vol_getblk(sb, src); if (!srcbuf) { err = -ENOMEM; /* FIXME: error code */ goto error; } dstbuf = vol_getblk(sb, dst); if (!dstbuf) { err = -ENOMEM; /* FIXME: error code */ goto error_put_srcbuf; } ret = bnode_merge_nodes(sb, bufdata(dstbuf), bufdata(srcbuf)); assert(ret == 1); mark_buffer_unify_non(dstbuf); mark_buffer_unify_non(srcbuf); blockput(dstbuf); error_put_srcbuf: blockput(srcbuf); error: return err; } static void del_func(struct bnode *bnode, u64 key, u64 count) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct index_entry *entry = bnode_lookup(bnode, key); assert(be64_to_cpu(entry->key) == key); bnode_remove_index(bnode, entry, count); } int replay_bnode_del(struct replay *rp, block_t bnode, tuxkey_t key, unsigned count) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } return replay_bnode_change(rp->sb, bnode, key, count, del_func); } static void adjust_func(struct bnode *bnode, u64 from, u64 to) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct index_entry *entry = bnode_lookup(bnode, from); assert(be64_to_cpu(entry->key) == from); entry->key = cpu_to_be64(to); } int replay_bnode_adjust(struct replay *rp, block_t bnode, tuxkey_t from, tuxkey_t to) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } return replay_bnode_change(rp->sb, bnode, from, to, adjust_func); }