/* * EFFECT: * - do flush in a background thread * PROCESS: * - if buf is NULL, we will do _flush_some_child * - if buf is NOT NULL, we will do _flush_buffer_to_child * ENTER: * - fe->node is already locked * EXIT: * - nodes are unlocked */ static void _flush_node_func(void *fe) { enum reactivity re; struct flusher_extra *extra = (struct flusher_extra*)fe; struct tree *t = extra->tree; struct node *n = extra->node; struct nmb *buf = extra->buffer; node_set_dirty(n); if (buf) { _flush_buffer_to_child(t, n, buf); nmb_free(buf); /* check the child node */ re = get_reactivity(t, n); if (re == FLUSHBLE) _flush_some_child(t, n); else cache_unpin(t->cf, n->cpair, make_cpair_attr(n)); } else { /* we want flush some buffer from n */ _flush_some_child(t, n); } xfree(extra); }
void node_free(struct node *node) { nassert(node != NULL); if (node->height == 0) { lmb_free(node->u.l.buffer); } else { uint32_t i; if (node->u.n.n_children > 0) { for (i = 0; i < node->u.n.n_children - 1; i++) { xfree(node->u.n.pivots[i].data); } for (i = 0; i < node->u.n.n_children; i++) { nmb_free(node->u.n.parts[i].buffer); } xfree(node->u.n.pivots); xfree(node->u.n.parts); } } xfree(node); }
static void free_nonleaf(struct nonleaf_childinfo *nonleaf) { nmb_free(nonleaf->buffer); xfree(nonleaf); }
void _flush_buffer_to_child(struct tree *t, struct node *child, struct nmb *buf) { struct mb_iter iter; mb_iter_init(&iter, buf->pma); while (mb_iter_next(&iter)) { /* TODO(BohuTANG): check msn */ struct nmb_values nvalues; nmb_get_values(&iter, &nvalues); struct bt_cmd cmd = { .msn = nvalues.msn, .type = nvalues.type, .key = &nvalues.key, .val = &nvalues.val, .xidpair = nvalues.xidpair }; node_put_cmd(t, child, &cmd); } } void _flush_some_child(struct tree *t, struct node *parent); /* * PROCESS: * - check child reactivity * - if FISSIBLE: split child * - if FLUSHBLE: flush buffer from child * ENTER: * - parent is already locked * - child is already locked * EXIT: * - parent is unlocked * - no nodes are locked */ void _child_maybe_reactivity(struct tree *t, struct node *parent, struct node *child) { enum reactivity re = get_reactivity(t, child); switch (re) { case STABLE: cache_unpin(t->cf, child->cpair, make_cpair_attr(child)); cache_unpin(t->cf, parent->cpair, make_cpair_attr(parent)); break; case FISSIBLE: node_split_child(t, parent, child); cache_unpin(t->cf, child->cpair, make_cpair_attr(child)); cache_unpin(t->cf, parent->cpair, make_cpair_attr(parent)); break; case FLUSHBLE: cache_unpin(t->cf, parent->cpair, make_cpair_attr(parent)); _flush_some_child(t, child); break; } } /* * PROCESS: * - pick a heaviest child of parent * - flush from parent to child * - maybe split/flush child recursively * ENTER: * - parent is already locked * EXIT: * - parent is unlocked * - no nodes are locked */ void _flush_some_child(struct tree *t, struct node *parent) { int childnum; enum reactivity re; struct node *child; struct partition *part; struct nmb *buffer; struct timespec t1, t2; childnum = node_find_heaviest_idx(parent); nassert(childnum < parent->n_children); part = &parent->parts[childnum]; buffer = part->ptr.u.nonleaf->buffer; if (cache_get_and_pin(t->cf, part->child_nid, (void**)&child, L_WRITE) != NESS_OK) { __ERROR("cache get node error, nid [%" PRIu64 "]", part->child_nid); return; } ngettime(&t1); re = get_reactivity(t, child); if (re == STABLE) { node_set_dirty(parent); part->ptr.u.nonleaf->buffer = nmb_new(t->e); _flush_buffer_to_child(t, child, buffer); nmb_free(buffer); } ngettime(&t2); status_add(&t->e->status->tree_flush_child_costs, time_diff_ms(t1, t2)); status_increment(&t->e->status->tree_flush_child_nums); _child_maybe_reactivity(t, parent, child); }