int probe(struct btree *btree, tuxkey_t key, struct cursor *cursor) { unsigned i, depth = btree->root.depth; struct buffer_head *buffer = sb_bread(vfs_sb(btree->sb), btree->root.block); if (!buffer) return -EIO; struct bnode *node = bufdata(buffer); for (i = 0; i < depth; i++) { struct index_entry *next = node->entries, *top = next + bcount(node); while (++next < top) /* binary search goes here */ if (from_be_u64(next->key) > key) break; trace("probe level %i, %ti of %i", i, next - node->entries, bcount(node)); level_push(cursor, buffer, next); if (!(buffer = sb_bread(vfs_sb(btree->sb), from_be_u64((next - 1)->block)))) goto eek; node = (struct bnode *)bufdata(buffer); } assert((btree->ops->leaf_sniff)(btree, bufdata(buffer))); level_push(cursor, buffer, NULL); cursor_check(cursor); return 0; eek: release_cursor(cursor); return -EIO; /* stupid, it might have been NOMEM */ }
static void bnode_split(struct bnode *src, unsigned pos, struct bnode *dst) { dst->count = cpu_to_be32(bcount(src) - pos); src->count = cpu_to_be32(pos); memcpy(&dst->entries[0], &src->entries[pos], bcount(dst) * sizeof(struct index_entry)); }
/* Lookup the index entry contains key */ static struct index_entry *bnode_lookup(struct bnode *node, tuxkey_t key) { struct index_entry *next = node->entries, *top = next + bcount(node); assert(bcount(node) > 0); /* binary search goes here */ while (++next < top) { if (be64_to_cpu(next->key) > key) break; } return next - 1; }
static void bnode_split(struct bnode *src, unsigned pos, struct bnode *dst) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } dst->count = cpu_to_be32(bcount(src) - pos); src->count = cpu_to_be32(pos); memcpy(&dst->entries[0], &src->entries[pos], bcount(dst) * sizeof(struct index_entry)); }
static int bnode_merge_nodes(struct sb *sb, struct bnode *into, struct bnode *from) { unsigned into_count = bcount(into), from_count = bcount(from); if (from_count + into_count > sb->entries_per_node) return 0; veccopy(&into->entries[into_count], from->entries, from_count); into->count = cpu_to_be32(into_count + from_count); return 1; }
/* Lookup the index entry contains key */ static struct index_entry *bnode_lookup(struct bnode *node, tuxkey_t key) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct index_entry *next = node->entries, *top = next + bcount(node); assert(bcount(node) > 0); /* binary search goes here */ while (++next < top) { if (be64_to_cpu(next->key) > key) break; } return next - 1; }
static void cursor_check(struct cursor *cursor) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } if (cursor->level == -1) return; tuxkey_t key = 0; block_t block = cursor->btree->root.block; for (int i = 0; i <= cursor->level; i++) { assert(bufindex(cursor->path[i].buffer) == block); if (i == cursor->level) break; struct bnode *bnode = level_node(cursor, i); struct index_entry *entry = cursor->path[i].next - 1; assert(bnode->entries <= entry); assert(entry < bnode->entries + bcount(bnode)); /* * If this entry is most left, it should be same key * with parent. Otherwise, most left key may not be * correct as next key. */ if (bnode->entries == entry) assert(be64_to_cpu(entry->key) == key); else assert(be64_to_cpu(entry->key) > key); block = be64_to_cpu(entry->block); key = be64_to_cpu(entry->key); } }
static void remove_index(struct cursor *cursor, int level) { struct bnode *node = cursor_node(cursor, level); int count = bcount(node), i; /* stomps the node count (if 0th key holds count) */ memmove(cursor->path[level].next - 1, cursor->path[level].next, (char *)&node->entries[count] - (char *)cursor->path[level].next); node->count = to_be_u32(count - 1); --(cursor->path[level].next); mark_buffer_dirty(cursor->path[level].buffer); /* no separator for last entry */ if (level_finished(cursor, level)) return; /* * Climb up to common parent and set separating key to deleted key. * What if index is now empty? (no deleted key) * Then some key above is going to be deleted and used to set sep * Climb the cursor while at first entry, bail out at root * find the node with the old sep, set it to deleted key */ if (cursor->path[level].next == node->entries && level) { be_u64 sep = (cursor->path[level].next)->key; for (i = level - 1; cursor->path[i].next - 1 == cursor_node(cursor, i)->entries; i--) if (!i) return; (cursor->path[i].next - 1)->key = sep; mark_buffer_dirty(cursor->path[i].buffer); } }
static void bnode_remove_index(struct bnode *node, struct index_entry *p, int count) { unsigned total = bcount(node); void *end = node->entries + total; memmove(p, p + count, end - (void *)(p + count)); node->count = cpu_to_be32(total - count); }
static int bnode_merge_nodes(struct sb *sb, struct bnode *into, struct bnode *from) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } unsigned into_count = bcount(into), from_count = bcount(from); if (from_count + into_count > sb->entries_per_node) return 0; veccopy(&into->entries[into_count], from->entries, from_count); into->count = cpu_to_be32(into_count + from_count); return 1; }
static void bnode_add_index(struct bnode *node, struct index_entry *p, block_t child, u64 childkey) { unsigned count = bcount(node); vecmove(p + 1, p, node->entries + count - p); p->block = cpu_to_be64(child); p->key = cpu_to_be64(childkey); node->count = cpu_to_be32(count + 1); }
/* There is no next entry? */ static inline int level_finished(struct cursor *cursor, int level) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct bnode *node = level_node(cursor, level); return cursor->path[level].next == node->entries + bcount(node); }
static void bnode_remove_index(struct bnode *node, struct index_entry *p, int count) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } unsigned total = bcount(node); void *end = node->entries + total; memmove(p, p + count, end - (void *)(p + count)); node->count = cpu_to_be32(total - count); }
static void bnode_add_index(struct bnode *node, struct index_entry *p, block_t child, u64 childkey) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } unsigned count = bcount(node); vecmove(p + 1, p, node->entries + count - p); p->block = cpu_to_be64(child); p->key = cpu_to_be64(childkey); node->count = cpu_to_be32(count + 1); }
static void cursor_check(struct cursor *cursor) { if (cursor->len == 0) return; tuxkey_t key = 0; block_t block = cursor->btree->root.block; for (int i = 0; i < cursor->len; i++) { assert(bufindex(cursor->path[i].buffer) == block); if (!cursor->path[i].next) break; struct bnode *node = cursor_node(cursor, i); assert(node->entries < cursor->path[i].next); assert(cursor->path[i].next <= node->entries + bcount(node)); assert(from_be_u64((cursor->path[i].next - 1)->key) >= key); block = from_be_u64((cursor->path[i].next - 1)->block); key = from_be_u64((cursor->path[i].next - 1)->key); } }
int main(){ const int N = 26; std::string a; getline(std::cin, a); std::string b; getline(std::cin, b); std::string c; getline(std::cin, c); std::vector<size_t> acount(N, 0); for(size_t p = 0; p < a.size(); p++){++acount[a[p] - 'a'];} std::vector<size_t> bcount(N, 0); for(size_t p = 0; p < b.size(); p++){++bcount[b[p] - 'a'];} std::vector<size_t> ccount(N, 0); for(size_t p = 0; p < c.size(); p++){++ccount[c[p] - 'a'];} long maxB(a.size()); for(size_t p = 0; p < N; p++){if(bcount[p] > 0 && (acount[p] / bcount[p]) < maxB){maxB = (acount[p] / bcount[p]);}} long maxSum(maxB), optB(maxB), optC(0); for(size_t b = 0; b <= maxB; b++){ size_t candC(a.size()); for(size_t p = 0; p < N; p++){if(ccount[p] > 0 && ((acount[p] - b * bcount[p])/ ccount[p]) < candC){candC = (acount[p] - b * bcount[p])/ccount[p];}} if(b + candC > maxSum){maxSum = b + candC; optB = b; optC = candC;} } for(int p = 0; p < optB; p++){std::cout << b;} for(int p = 0; p < optC; p++){std::cout << c;} for(int p = 0; p < N; p++){ size_t rem = acount[p] - optB * bcount[p] - optC * ccount[p]; while(rem--){std::cout << char('a' + p);} } std::cout << std::endl; return 0; }
static void merge_nodes(struct bnode *node, struct bnode *node2) { veccopy(&node->entries[bcount(node)], node2->entries, bcount(node2)); node->count = to_be_u32(bcount(node) + bcount(node2)); }
/* * This is range deletion. So, instead of adjusting balance of the * space on sibling nodes for each change, this just removes the range * and merges from right to left even if it is not same parent. * * +--------------- (A, B, C)--------------------+ * | | | * +-- (AA, AB, AC) -+ +- (BA, BB, BC) -+ + (CA, CB, CC) + * | | | | | | | | | * (AAA,AAB)(ABA,ABB)(ACA,ACB) (BAA,BAB)(BBA)(BCA,BCB) (CAA)(CBA,CBB)(CCA) * * [less : A, AA, AAA, AAB, AB, ABA, ABB, AC, ACA, ACB, B, BA ... : greater] * * If we merged from cousin (or re-distributed), we may have to update * the index until common parent. (e.g. removed (ACB), then merged * from (BAA,BAB) to (ACA), we have to adjust B in root node to BB) * * See, adjust_parent_sep(). * * FIXME: no re-distribute. so, we don't guarantee above than 50% * space efficiency. And if range is end of key (truncate() case), we * don't need to merge, and adjust_parent_sep(). * * FIXME2: we may want to split chop work for each step. instead of * blocking for a long time. */ int btree_chop(struct btree *btree, tuxkey_t start, u64 len) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct sb *sb = btree->sb; struct btree_ops *ops = btree->ops; struct buffer_head **prev, *leafprev = NULL; struct chopped_index_info *cii; struct cursor *cursor; tuxkey_t limit; int ret, done = 0; if (!has_root(btree)) return 0; /* Chop all range if len >= TUXKEY_LIMIT */ limit = (len >= TUXKEY_LIMIT) ? TUXKEY_LIMIT : start + len; prev = malloc(sizeof(*prev) * btree->root.depth); if (prev == NULL) return -ENOMEM; memset(prev, 0, sizeof(*prev) * btree->root.depth); cii = malloc(sizeof(*cii) * btree->root.depth); if (cii == NULL) { ret = -ENOMEM; goto error_cii; } memset(cii, 0, sizeof(*cii) * btree->root.depth); cursor = alloc_cursor(btree, 0); if (!cursor) { ret = -ENOMEM; goto error_alloc_cursor; } down_write(&btree->lock); ret = btree_probe(cursor, start); if (ret) goto error_btree_probe; /* Walk leaves */ while (1) { struct buffer_head *leafbuf; tuxkey_t this_key; /* * FIXME: If leaf was merged and freed later, we don't * need to redirect leaf and leaf_chop() */ if ((ret = cursor_redirect(cursor))) goto out; leafbuf = cursor_pop(cursor); /* Adjust start and len for this leaf */ this_key = cursor_level_this_key(cursor); if (start < this_key) { if (limit < TUXKEY_LIMIT) len -= this_key - start; start = this_key; } ret = ops->leaf_chop(btree, start, len, bufdata(leafbuf)); if (ret) { if (ret < 0) { blockput(leafbuf); goto out; } mark_buffer_dirty_non(leafbuf); } /* Try to merge this leaf with prev */ if (leafprev) { if (try_leaf_merge(btree, leafprev, leafbuf)) { trace(">>> can merge leaf %p into leaf %p", leafbuf, leafprev); remove_index(cursor, cii); mark_buffer_dirty_non(leafprev); blockput_free(sb, leafbuf); goto keep_prev_leaf; } blockput(leafprev); } leafprev = leafbuf; keep_prev_leaf: if (cursor_level_next_key(cursor) >= limit) done = 1; /* Pop and try to merge finished nodes */ while (done || cursor_level_finished(cursor)) { struct buffer_head *buf; int level = cursor->level; struct chopped_index_info *ciil = &cii[level]; /* Get merge src buffer, and go parent level */ buf = cursor_pop(cursor); /* * Logging chopped indexes * FIXME: If node is freed later (e.g. merged), * we dont't need to log this */ if (ciil->count) { log_bnode_del(sb, bufindex(buf), ciil->start, ciil->count); } memset(ciil, 0, sizeof(*ciil)); /* Try to merge node with prev */ if (prev[level]) { assert(level); if (try_bnode_merge(sb, prev[level], buf)) { trace(">>> can merge node %p into node %p", buf, prev[level]); remove_index(cursor, cii); mark_buffer_unify_non(prev[level]); blockput_free_unify(sb, buf); goto keep_prev_node; } blockput(prev[level]); } prev[level] = buf; keep_prev_node: if (!level) goto chop_root; } /* Push back down to leaf level */ do { ret = cursor_advance_down(cursor); if (ret < 0) goto out; } while (ret); } chop_root: /* Remove depth if possible */ while (btree->root.depth > 1 && bcount(bufdata(prev[0])) == 1) { trace("drop btree level"); btree->root.block = bufindex(prev[1]); btree->root.depth--; tux3_mark_btree_dirty(btree); /* * We know prev[0] is redirected and dirty. So, in * here, we can just cancel bnode_redirect by bfree(), * instead of defered_bfree() * FIXME: we can optimize freeing bnode without * bnode_redirect, and if we did, this is not true. */ bfree(sb, bufindex(prev[0]), 1); log_bnode_free(sb, bufindex(prev[0])); blockput_free_unify(sb, prev[0]); vecmove(prev, prev + 1, btree->root.depth); } ret = 0; out: if (leafprev) blockput(leafprev); for (int i = 0; i < btree->root.depth; i++) { if (prev[i]) blockput(prev[i]); } release_cursor(cursor); error_btree_probe: up_write(&btree->lock); free_cursor(cursor); error_alloc_cursor: free(cii); error_cii: free(prev); return ret; }
int tree_chop(struct btree *btree, struct delete_info *info, millisecond_t deadline) { int depth = btree->root.depth, level = depth - 1, suspend = 0; struct cursor *cursor; struct buffer_head *leafbuf, **prev, *leafprev = NULL; struct btree_ops *ops = btree->ops; struct sb *sb = btree->sb; int ret; cursor = alloc_cursor(btree, 0); prev = malloc(sizeof(*prev) * depth); memset(prev, 0, sizeof(*prev) * depth); down_write(&btree->lock); probe(btree, info->resume, cursor); leafbuf = level_pop(cursor); /* leaf walk */ while (1) { ret = (ops->leaf_chop)(btree, info->key, bufdata(leafbuf)); if (ret) { mark_buffer_dirty(leafbuf); if (ret < 0) goto error_leaf_chop; } /* try to merge this leaf with prev */ if (leafprev) { struct vleaf *this = bufdata(leafbuf); struct vleaf *that = bufdata(leafprev); /* try to merge leaf with prev */ if ((ops->leaf_need)(btree, this) <= (ops->leaf_free)(btree, that)) { trace(">>> can merge leaf %p into leaf %p", leafbuf, leafprev); (ops->leaf_merge)(btree, that, this); remove_index(cursor, level); mark_buffer_dirty(leafprev); brelse_free(btree, leafbuf); //dirty_buffer_count_check(sb); goto keep_prev_leaf; } brelse(leafprev); } leafprev = leafbuf; keep_prev_leaf: //nanosleep(&(struct timespec){ 0, 50 * 1000000 }, NULL); //printf("time remaining: %Lx\n", deadline - gettime()); // if (deadline && gettime() > deadline) // suspend = -1; if (info->blocks && info->freed >= info->blocks) suspend = -1; /* pop and try to merge finished nodes */ while (suspend || level_finished(cursor, level)) { /* try to merge node with prev */ if (prev[level]) { assert(level); /* node has no prev */ struct bnode *this = cursor_node(cursor, level); struct bnode *that = bufdata(prev[level]); trace_off("check node %p against %p", this, that); trace_off("this count = %i prev count = %i", bcount(this), bcount(that)); /* try to merge with node to left */ if (bcount(this) <= sb->entries_per_node - bcount(that)) { trace(">>> can merge node %p into node %p", this, that); merge_nodes(that, this); remove_index(cursor, level - 1); mark_buffer_dirty(prev[level]); brelse_free(btree, level_pop(cursor)); //dirty_buffer_count_check(sb); goto keep_prev_node; } brelse(prev[level]); } prev[level] = level_pop(cursor); keep_prev_node: /* deepest key in the cursor is the resume address */ if (suspend == -1 && !level_finished(cursor, level)) { suspend = 1; /* only set resume once */ info->resume = from_be_u64((cursor->path[level].next)->key); } if (!level) { /* remove depth if possible */ while (depth > 1 && bcount(bufdata(prev[0])) == 1) { trace("drop btree level"); btree->root.block = bufindex(prev[1]); mark_btree_dirty(btree); brelse_free(btree, prev[0]); //dirty_buffer_count_check(sb); depth = --btree->root.depth; vecmove(prev, prev + 1, depth); //set_sb_dirty(sb); } //sb->snapmask &= ~snapmask; delete_snapshot_from_disk(); //set_sb_dirty(sb); //save_sb(sb); ret = suspend; goto out; } level--; trace_off(printf("pop to level %i, block %Lx, %i of %i nodes\n", level, bufindex(cursor->path[level].buffer), cursor->path[level].next - cursor_node(cursor, level)->entries, bcount(cursor_node(cursor, level)));); } /* push back down to leaf level */ while (level < depth - 1) { struct buffer_head *buffer = sb_bread(vfs_sb(sb), from_be_u64(cursor->path[level++].next++->block)); if (!buffer) { ret = -EIO; goto out; } level_push(cursor, buffer, ((struct bnode *)bufdata(buffer))->entries); trace_off(printf("push to level %i, block %Lx, %i nodes\n", level, bufindex(buffer), bcount(cursor_node(cursor, level)));); }
/* There is no next entry? */ static inline int level_finished(struct cursor *cursor, int level) { struct bnode *node = level_node(cursor, level); return cursor->path[level].next == node->entries + bcount(node); }
void evali(const char *str, char *err, int *eval) { // Mathematical Expression Evaluation Function // ----------------------------------------------- // This functions solves mathematical equations. // When a complex equation is passed via *str, // the equation is broken into parts, and the // simplest part is passed on recursively onto // the function itself. This recurvise process is // repeated until the whole equation has been solved. // Results of the evaluation are stored in int eval. if(!bcheck(str)) { /* this is the core of eval where the calculations are done. at this level, the equation does not have any brackets. */ const char symbols[]="^*/%+-&"; const char se[]="Invalid Syntax"; char *tmp = NULL; if((tmp=(char *)(malloc(sizeof(char)* (strlen(str)+1))))==NULL) allocerr(); *tmp='\0'; /* check wether str has reached the absolute stage */ if(prechar(tmp,str,symbols)==0) { printf("\n[simple]"); if(!ncheck(str)) { printf("\nequation solved!"); *eval = strint(str); free(tmp); tmp = NULL; return; } else if(!id_check(str)) { printf("\nit's a variable!"); free(tmp); tmp = NULL; return; } else { strcpy(err,se); stradd(err,": "); stradd(err,str); free(tmp); tmp = NULL; return; } } else /* there are symbols in str */ { free(tmp); tmp = NULL; } /* now the real maths */ printf("\n[complex]"); char *pre = NULL; /* string preceding of operator */ char *pos = NULL; /* string succeding the operator */ /* now allocate the variables */ if((pre=(char *)(malloc(sizeof(char)* (strlen(str)+1))))==NULL) allocerr(); *pre='\0'; if((pos=(char *)(malloc(sizeof(char)* (strlen(str)+1))))==NULL) allocerr(); *pos='\0'; char symbol = 0; if(prechar(pre,str,"^")) { if(postchar(pos,str,"^")) symbol = '^'; } else if(prechar(pre,str,"*")) { if(postchar(pos,str,"*")) symbol = '*'; } else if(prechar(pre,str,"/")) { if(postchar(pos,str,"/")) symbol = '/'; } else if(prechar(pre,str,"%")) { if(postchar(pos,str,"%")) symbol = '%'; } else if(prechar(pre,str,"+")) { if(postchar(pos,str,"+")) symbol = '+'; } else if(prechar(pre,str,"-")) { if(postchar(pos,str,"-")) symbol = '-'; } else if(prechar(pre,str,"&")) { if(postchar(pos,str,"&")) symbol = '&'; } char *ax = NULL; /* value preceding of operator */ char *bx = NULL; /* value succeding the operator */ char *cx = NULL; /* value of ax and bx processed */ /* now allocate the variables */ if((ax=(char *)(malloc(sizeof(char)* (strlen(pre)+1))))==NULL) allocerr(); *ax='\0'; if((bx=(char *)(malloc(sizeof(char)* (strlen(pos)+1))))==NULL) allocerr(); *bx='\0'; if((cx=(char *)(malloc(sizeof(char)* (strlen(str)+1))))==NULL) allocerr(); *cx='\0'; /* find out the contents of bx */ char *ebx = NULL; /* temp string to build bx */ if((ebx=(char *)(malloc(sizeof(char)* (strlen(pos)+1))))==NULL) allocerr(); *ebx='\0'; strcpy(bx,pos); strcpy(ebx,bx); for(;;) /* infinite loop */ { if(!prechar(bx,ebx,symbols)) { strcpy(bx,ebx); free(ebx); ebx = NULL; /* de-allocate ebx */ break; } else /* here ebx is build */ strcpy(ebx,bx); } /* find out the contents of ax */ char *eax = NULL; /* temp string to build ax */ if((eax=(char *)(malloc(sizeof(char)* (strlen(pre)+1))))==NULL) allocerr(); *eax='\0'; strcpy(ax,pre); strcpy(eax,ax); for(;;) /* infinite loop */ { if(!postchar(ax,eax,symbols)) { strcpy(ax,eax); free(eax); eax = NULL; /* de-allocate eax */ break; } else /* here eax is build */ strcpy(eax,ax); } /* variables to store (pre-ax) and (pre-bx) */ char *prex = NULL; /* string of (pre-ax) */ char *posx = NULL; /* string of (pos-ax) */ /* now allocate prex and posx */ if((prex=(char *)(malloc(sizeof(char)* (strlen(pre)+1))))==NULL) allocerr(); *prex='\0'; if((posx=(char *)(malloc(sizeof(char)* (strlen(pos)+1))))==NULL) allocerr(); *posx='\0'; /* find prex and posx */ strlft(prex,pre,(strlen(pre)-strlen(ax))); strrht(posx,pos,(strlen(pos)-strlen(bx))); /* de-allocate pre & pos */ printf("\nsym: %c",symbol); printf("\npre: %s",pre); printf("\npos: %s",pos); free(pre); pre = NULL; free(pos); pos = NULL; /* process ax and bx to find cx */ // ***************** /* de-allocate ax & bx */ printf("\n*ax: %s",ax); printf("\n*bx: %s",bx); printf("\n*cx: %s",cx); printf("\nprx: %s",prex); printf("\npox: %s",posx); free(ax); ax = NULL; free(bx); bx = NULL; /* variable to store one-step solved equation */ char *ex = NULL; if((ex=(char *)(malloc(sizeof(char)* (strlen(str)+1))))==NULL) allocerr(); *ex='\0'; /* find ex using cx in prex and posx */ // ***************** /* now de-allocate cx, prex & posx */ free(cx); cx = NULL; free(prex); cx = NULL; free(posx); cx = NULL; /* recurse ex on eval for next-step solving */ // ***************** /* de-allocate ex & return */ free(ex); ex = NULL; return; } else { if(!bcount(str)) { printf("\nEquation has brackets."); return; } else { strcpy(err,"Illegal Equation, inequal number of brackets."); return; } } }
/* * Insert new leaf to next cursor position. * keep == 1: keep current cursor position. * keep == 0, set cursor position to new leaf. */ static int insert_leaf(struct cursor *cursor, tuxkey_t childkey, struct buffer_head *leafbuf, int keep) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct btree *btree = cursor->btree; struct sb *sb = btree->sb; int level = btree->root.depth; block_t childblock = bufindex(leafbuf); if (keep) blockput(leafbuf); else { cursor_pop_blockput(cursor); cursor_push(cursor, leafbuf, NULL); } while (level--) { struct path_level *at = &cursor->path[level]; struct buffer_head *parentbuf = at->buffer; struct bnode *parent = bufdata(parentbuf); /* insert and exit if not full */ if (bcount(parent) < btree->sb->entries_per_node) { bnode_add_index(parent, at->next, childblock, childkey); if (!keep) at->next++; log_bnode_add(sb, bufindex(parentbuf), childblock, childkey); mark_buffer_unify_non(parentbuf); cursor_check(cursor); return 0; } /* split a full index node */ struct buffer_head *newbuf = new_node(btree); if (IS_ERR(newbuf)) return PTR_ERR(newbuf); struct bnode *newnode = bufdata(newbuf); unsigned half = bcount(parent) / 2; u64 newkey = be64_to_cpu(parent->entries[half].key); bnode_split(parent, half, newnode); log_bnode_split(sb, bufindex(parentbuf), half, bufindex(newbuf)); /* if the cursor is in the new node, use that as the parent */ int child_is_left = at->next <= parent->entries + half; if (!child_is_left) { struct index_entry *newnext; mark_buffer_unify_non(parentbuf); newnext = newnode->entries + (at->next - &parent->entries[half]); get_bh(newbuf); level_replace_blockput(cursor, level, newbuf, newnext); parentbuf = newbuf; parent = newnode; } else mark_buffer_unify_non(newbuf); bnode_add_index(parent, at->next, childblock, childkey); if (!keep) at->next++; log_bnode_add(sb, bufindex(parentbuf), childblock, childkey); mark_buffer_unify_non(parentbuf); childkey = newkey; childblock = bufindex(newbuf); blockput(newbuf); /* * If child is in left bnode, we should keep the * cursor position to child, otherwise adjust cursor * to new bnode. */ keep = child_is_left; } /* Make new root bnode */ trace("add tree level"); struct buffer_head *newbuf = new_node(btree); if (IS_ERR(newbuf)) return PTR_ERR(newbuf); struct bnode *newroot = bufdata(newbuf); block_t newrootblock = bufindex(newbuf); block_t oldrootblock = btree->root.block; int left_node = bufindex(cursor->path[0].buffer) != childblock; bnode_init_root(newroot, 2, oldrootblock, childblock, childkey); cursor_root_add(cursor, newbuf, newroot->entries + 1 + !left_node); log_bnode_root(sb, newrootblock, 2, oldrootblock, childblock, childkey); /* Change btree to point the new root */ btree->root.block = newrootblock; btree->root.depth++; mark_buffer_unify_non(newbuf); tux3_mark_btree_dirty(btree); cursor_check(cursor); return 0; }
int alloc_empty_btree(struct btree *btree) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct sb *sb = btree->sb; struct buffer_head *rootbuf = new_node(btree); if (IS_ERR(rootbuf)) goto error; struct buffer_head *leafbuf = new_leaf(btree); if (IS_ERR(leafbuf)) goto error_leafbuf; assert(!has_root(btree)); struct bnode *rootnode = bufdata(rootbuf); block_t rootblock = bufindex(rootbuf); block_t leafblock = bufindex(leafbuf); trace("root at %Lx", rootblock); trace("leaf at %Lx", leafblock); bnode_init_root(rootnode, 1, leafblock, 0, 0); log_bnode_root(sb, rootblock, 1, leafblock, 0, 0); log_balloc(sb, leafblock, 1); mark_buffer_unify_non(rootbuf); blockput(rootbuf); mark_buffer_dirty_non(leafbuf); blockput(leafbuf); btree->root = (struct root){ .block = rootblock, .depth = 1 }; tux3_mark_btree_dirty(btree); return 0; error_leafbuf: (btree->ops->bfree)(sb, bufindex(rootbuf), 1); blockput(rootbuf); rootbuf = leafbuf; error: return PTR_ERR(rootbuf); } /* FIXME: right? and this should be done by btree_chop()? */ int free_empty_btree(struct btree *btree) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct btree_ops *ops = btree->ops; if (!has_root(btree)) return 0; assert(btree->root.depth == 1); struct sb *sb = btree->sb; struct buffer_head *rootbuf = vol_bread(sb, btree->root.block); if (!rootbuf) return -EIO; assert(bnode_sniff(bufdata(rootbuf))); /* Make btree has no root */ btree->root = no_root; tux3_mark_btree_dirty(btree); struct bnode *rootnode = bufdata(rootbuf); assert(bcount(rootnode) == 1); block_t leaf = be64_to_cpu(rootnode->entries[0].block); struct buffer_head *leafbuf = vol_find_get_block(sb, leaf); if (leafbuf && !leaf_need_redirect(sb, leafbuf)) { /* * This is redirected leaf. So, in here, we can just * cancel leaf_redirect by bfree(), instead of * defered_bfree(). */ bfree(sb, leaf, 1); log_leaf_free(sb, leaf); assert(ops->leaf_can_free(btree, bufdata(leafbuf))); blockput_free(sb, leafbuf); } else { defer_bfree(&sb->defree, leaf, 1); log_bfree(sb, leaf, 1); if (leafbuf) { assert(ops->leaf_can_free(btree, bufdata(leafbuf))); blockput(leafbuf); } } if (!bnode_need_redirect(sb, rootbuf)) { /* * This is redirected bnode. So, in here, we can just * cancel bnode_redirect by bfree(), instead of * defered_bfree(). */ bfree(sb, bufindex(rootbuf), 1); log_bnode_free(sb, bufindex(rootbuf)); blockput_free_unify(sb, rootbuf); } else { defer_bfree(&sb->deunify, bufindex(rootbuf), 1); log_bfree_on_unify(sb, bufindex(rootbuf), 1); blockput(rootbuf); } return 0; } int replay_bnode_redirect(struct replay *rp, block_t oldblock, block_t newblock) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct sb *sb = rp->sb; struct buffer_head *newbuf, *oldbuf; int err = 0; newbuf = vol_getblk(sb, newblock); if (!newbuf) { err = -ENOMEM; /* FIXME: error code */ goto error; } oldbuf = vol_bread(sb, oldblock); if (!oldbuf) { err = -EIO; /* FIXME: error code */ goto error_put_newbuf; } assert(bnode_sniff(bufdata(oldbuf))); memcpy(bufdata(newbuf), bufdata(oldbuf), bufsize(newbuf)); mark_buffer_unify_atomic(newbuf); blockput(oldbuf); error_put_newbuf: blockput(newbuf); error: return err; } int replay_bnode_root(struct replay *rp, block_t root, unsigned count, block_t left, block_t right, tuxkey_t rkey) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct sb *sb = rp->sb; struct buffer_head *rootbuf; rootbuf = vol_getblk(sb, root); if (!rootbuf) return -ENOMEM; bnode_buffer_init(rootbuf); bnode_init_root(bufdata(rootbuf), count, left, right, rkey); mark_buffer_unify_atomic(rootbuf); blockput(rootbuf); return 0; } /* * Before this replay, replay should already dirty the buffer of src. * (e.g. by redirect) */ int replay_bnode_split(struct replay *rp, block_t src, unsigned pos, block_t dst) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct sb *sb = rp->sb; struct buffer_head *srcbuf, *dstbuf; int err = 0; srcbuf = vol_getblk(sb, src); if (!srcbuf) { err = -ENOMEM; /* FIXME: error code */ goto error; } dstbuf = vol_getblk(sb, dst); if (!dstbuf) { err = -ENOMEM; /* FIXME: error code */ goto error_put_srcbuf; } bnode_buffer_init(dstbuf); bnode_split(bufdata(srcbuf), pos, bufdata(dstbuf)); mark_buffer_unify_non(srcbuf); mark_buffer_unify_atomic(dstbuf); blockput(dstbuf); error_put_srcbuf: blockput(srcbuf); error: return err; } /* * Before this replay, replay should already dirty the buffer of bnodeblock. * (e.g. by redirect) */ static int replay_bnode_change(struct sb *sb, block_t bnodeblock, u64 val1, u64 val2, void (*change)(struct bnode *, u64, u64)) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct buffer_head *bnodebuf; bnodebuf = vol_getblk(sb, bnodeblock); if (!bnodebuf) return -ENOMEM; /* FIXME: error code */ struct bnode *bnode = bufdata(bnodebuf); change(bnode, val1, val2); mark_buffer_unify_non(bnodebuf); blockput(bnodebuf); return 0; } static void add_func(struct bnode *bnode, u64 child, u64 key) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct index_entry *entry = bnode_lookup(bnode, key) + 1; bnode_add_index(bnode, entry, child, key); } int replay_bnode_add(struct replay *rp, block_t parent, block_t child, tuxkey_t key) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } return replay_bnode_change(rp->sb, parent, child, key, add_func); } static void update_func(struct bnode *bnode, u64 child, u64 key) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct index_entry *entry = bnode_lookup(bnode, key); assert(be64_to_cpu(entry->key) == key); entry->block = cpu_to_be64(child); } int replay_bnode_update(struct replay *rp, block_t parent, block_t child, tuxkey_t key) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } return replay_bnode_change(rp->sb, parent, child, key, update_func); } int replay_bnode_merge(struct replay *rp, block_t src, block_t dst) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct sb *sb = rp->sb; struct buffer_head *srcbuf, *dstbuf; int err = 0, ret; srcbuf = vol_getblk(sb, src); if (!srcbuf) { err = -ENOMEM; /* FIXME: error code */ goto error; } dstbuf = vol_getblk(sb, dst); if (!dstbuf) { err = -ENOMEM; /* FIXME: error code */ goto error_put_srcbuf; } ret = bnode_merge_nodes(sb, bufdata(dstbuf), bufdata(srcbuf)); assert(ret == 1); mark_buffer_unify_non(dstbuf); mark_buffer_unify_non(srcbuf); blockput(dstbuf); error_put_srcbuf: blockput(srcbuf); error: return err; } static void del_func(struct bnode *bnode, u64 key, u64 count) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct index_entry *entry = bnode_lookup(bnode, key); assert(be64_to_cpu(entry->key) == key); bnode_remove_index(bnode, entry, count); } int replay_bnode_del(struct replay *rp, block_t bnode, tuxkey_t key, unsigned count) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } return replay_bnode_change(rp->sb, bnode, key, count, del_func); } static void adjust_func(struct bnode *bnode, u64 from, u64 to) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct index_entry *entry = bnode_lookup(bnode, from); assert(be64_to_cpu(entry->key) == from); entry->key = cpu_to_be64(to); } int replay_bnode_adjust(struct replay *rp, block_t bnode, tuxkey_t from, tuxkey_t to) { if(DEBUG_MODE_K==1) { printf("\t\t\t\t%25s[K] %25s %4d #in\n",__FILE__,__func__,__LINE__); } return replay_bnode_change(rp->sb, bnode, from, to, adjust_func); }