void hammer_flusher_clean_loose_ios(hammer_mount_t hmp) { hammer_buffer_t buffer; hammer_io_t io; /* * loose ends - buffers without bp's aren't tracked by the kernel * and can build up, so clean them out. This can occur when an * IO completes on a buffer with no references left. * * The io_token is needed to protect the list. */ if ((io = RB_ROOT(&hmp->lose_root)) != NULL) { lwkt_gettoken(&hmp->io_token); while ((io = RB_ROOT(&hmp->lose_root)) != NULL) { KKASSERT(io->mod_root == &hmp->lose_root); RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io); io->mod_root = NULL; hammer_ref(&io->lock); buffer = (void *)io; hammer_rel_buffer(buffer, 0); } lwkt_reltoken(&hmp->io_token); } }
/** * Restore the red-black properties of a red-black tree after the * splicing out of a node * * This function has three parameters: the tree to be fixed up, the * node where the trouble occurs, and the order. _rb_fixup() is an * implementation of the routine RB-DELETE-FIXUP on p. 274 of Cormen * et al. (p. 326 in the paperback version of the 2009 edition). */ HIDDEN void _rb_fixup(struct bu_rb_tree *tree, struct bu_rb_node *node, int order) { int direction; struct bu_rb_node *parent; struct bu_rb_node *w; BU_CKMAG(tree, BU_RB_TREE_MAGIC, "red-black tree"); BU_CKMAG(node, BU_RB_NODE_MAGIC, "red-black node"); RB_CKORDER(tree, order); while ((node != RB_ROOT(tree, order)) && (RB_GET_COLOR(node, order) == RB_BLK)) { parent = RB_PARENT(node, order); if (node == RB_LEFT_CHILD(parent, order)) direction = RB_LEFT; else direction = RB_RIGHT; w = RB_OTHER_CHILD(parent, order, direction); if (RB_GET_COLOR(w, order) == RB_RED) { RB_SET_COLOR(w, order, RB_BLK); RB_SET_COLOR(parent, order, RB_RED); RB_ROTATE(parent, order, direction); w = RB_OTHER_CHILD(parent, order, direction); } if ((RB_GET_COLOR(RB_CHILD(w, order, direction), order) == RB_BLK) && (RB_GET_COLOR(RB_OTHER_CHILD(w, order, direction), order) == RB_BLK)) { RB_SET_COLOR(w, order, RB_RED); node = parent; } else { if (RB_GET_COLOR(RB_OTHER_CHILD(w, order, direction), order) == RB_BLK) { RB_SET_COLOR(RB_CHILD(w, order, direction), order, RB_BLK); RB_SET_COLOR(w, order, RB_RED); RB_OTHER_ROTATE(w, order, direction); w = RB_OTHER_CHILD(parent, order, direction); } RB_SET_COLOR(w, order, RB_GET_COLOR(parent, order)); RB_SET_COLOR(parent, order, RB_BLK); RB_SET_COLOR(RB_OTHER_CHILD(w, order, direction), order, RB_BLK); RB_ROTATE(parent, order, direction); node = RB_ROOT(tree, order); } } RB_SET_COLOR(node, order, RB_BLK); }
static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev, unsigned long page_start, unsigned long num_pages) { unsigned long cur_offset; struct ttm_buffer_object *bo; struct ttm_buffer_object *best_bo = NULL; bo = RB_ROOT(&bdev->addr_space_rb); while (bo != NULL) { cur_offset = bo->vm_node->start; if (page_start >= cur_offset) { best_bo = bo; if (page_start == cur_offset) break; bo = RB_RIGHT(bo, vm_rb); } else bo = RB_LEFT(bo, vm_rb); } if (unlikely(best_bo == NULL)) return NULL; if (unlikely((best_bo->vm_node->start + best_bo->num_pages) < (page_start + num_pages))) return NULL; return best_bo; }
/* * The algorithm is from * I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline * First: A Flexible and Accurate Mechanism for Proportional Share * Resource Allocation,'' technical report. * * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf * * - Partition the tree into two parts by ve: * - One part contains nodes with ve smaller than vtime * - The other part contains nodes with ve larger than vtime * - In the first part, find the node with minimum vd, along the * min_vd value path * * Returns * NULL, if no node with ve smaller than vtime * or the elegible node with minimum vd. */ static struct bfq_thread_io * wf2q_augtree_get_eligible_with_min_vd(struct wf2q_augtree_t *tree, int vtime) { struct bfq_thread_io *node = RB_ROOT(tree), *st_tree = NULL, *path_req = NULL; while (node) { if (node->ve <= vtime) { /* update node with earliest deadline along path. */ if ((!path_req) || (path_req->vd > node->vd)) path_req = node; /* update root of subtree containing earliest deadline */ if ((!st_tree) || (RB_LEFT(node,entry) && st_tree->min_vd > RB_LEFT(node,entry)->min_vd)) st_tree = RB_LEFT(node,entry); node = RB_RIGHT(node, entry); } else node = RB_LEFT(node, entry); } /* check whether node with earliest deadline was along path */ if ((!st_tree) || (st_tree->min_vd >= path_req->vd)) return path_req; /* return node with earliest deadline from subtree */ for (node = st_tree; node; ) { /* if node found, return it */ if (st_tree->min_vd == node->vd) return node; /* XXX: modified temporarily */ if (RB_LEFT(node, entry) && node->min_vd == RB_LEFT(node, entry)->min_vd) node = RB_LEFT(node, entry); else node = RB_RIGHT(node, entry); } return NULL; }
int cmd_unbind_key_table(struct cmd *self, struct cmd_ctx *ctx, int key) { struct args *args = self->args; const char *tablename; const struct mode_key_table *mtab; struct mode_key_binding *mbind, mtmp; tablename = args_get(args, 't'); if ((mtab = mode_key_findtable(tablename)) == NULL) { ctx->error(ctx, "unknown key table: %s", tablename); return (-1); } if (key == KEYC_NONE) { while (!RB_EMPTY(mtab->tree)) { mbind = RB_ROOT(mtab->tree); RB_REMOVE(mode_key_tree, mtab->tree, mbind); xfree(mbind); } return (0); } mtmp.key = key; mtmp.mode = !!args_has(args, 'c'); if ((mbind = RB_FIND(mode_key_tree, mtab->tree, &mtmp)) != NULL) { RB_REMOVE(mode_key_tree, mtab->tree, mbind); xfree(mbind); } return (0); }
enum cmd_retval cmd_unbind_key_mode_table(struct cmd *self, struct cmd_q *cmdq, key_code key) { struct args *args = self->args; const char *tablename; const struct mode_key_table *mtab; struct mode_key_binding *mbind, mtmp; tablename = args_get(args, 't'); if ((mtab = mode_key_findtable(tablename)) == NULL) { cmdq_error(cmdq, "unknown key table: %s", tablename); return (CMD_RETURN_ERROR); } if (key == KEYC_UNKNOWN) { while (!RB_EMPTY(mtab->tree)) { mbind = RB_ROOT(mtab->tree); RB_REMOVE(mode_key_tree, mtab->tree, mbind); free(mbind); } return (CMD_RETURN_NORMAL); } mtmp.key = key; mtmp.mode = !!args_has(args, 'c'); if ((mbind = RB_FIND(mode_key_tree, mtab->tree, &mtmp)) != NULL) { RB_REMOVE(mode_key_tree, mtab->tree, mbind); free(mbind); } return (CMD_RETURN_NORMAL); }
int cmd_unbind_key_exec(struct cmd *self, unused struct cmd_ctx *ctx) { struct args *args = self->args; struct key_binding *bd; int key; if (!args_has(args, 'a')) { key = key_string_lookup_string(args->argv[0]); if (key == KEYC_NONE) { ctx->error(ctx, "unknown key: %s", args->argv[0]); return (-1); } } else key = KEYC_NONE; if (args_has(args, 't')) return (cmd_unbind_key_table(self, ctx, key)); if (key == KEYC_NONE) { while (!RB_EMPTY(&key_bindings)) { bd = RB_ROOT(&key_bindings); key_bindings_remove(bd->key); } return (0); } if (!args_has(args, 'n')) key |= KEYC_PREFIX; key_bindings_remove(key); return (0); }
/* * return boolean whether or not the last ctfile_list contained * filename. */ int ct_file_on_server(struct ct_global_state *state, char *filename) { struct ctfile_list_tree results; struct ctfile_list_file *file = NULL; char *filelist[2]; int exists = 0; RB_INIT(&results); filelist[0] = filename; filelist[1] = NULL; ctfile_list_complete(&state->ctfile_list_files, CT_MATCH_GLOB, filelist, NULL, &results); /* Check to see if we already have a secrets file on the server */ if (RB_MIN(ctfile_list_tree, &results) != NULL) { exists = 1; } while ((file = RB_ROOT(&results)) != NULL) { RB_REMOVE(ctfile_list_tree, &results, file); e_free(&file); } return (exists); }
boolean_t vm_map_store_lookup_entry_rb( vm_map_t map, vm_map_offset_t address, vm_map_entry_t *vm_entry) { struct vm_map_header hdr = map->hdr; struct vm_map_store *rb_entry = RB_ROOT(&(hdr.rb_head_store)); vm_map_entry_t cur = vm_map_to_entry(map); vm_map_entry_t prev = VM_MAP_ENTRY_NULL; while (rb_entry != (struct vm_map_store*)NULL) { cur = VME_FOR_STORE(rb_entry); if(cur == VM_MAP_ENTRY_NULL) panic("no entry"); if (address >= cur->vme_start) { if (address < cur->vme_end) { *vm_entry = cur; return TRUE; } rb_entry = RB_RIGHT(rb_entry, entry); prev = cur; } else { rb_entry = RB_LEFT(rb_entry, entry); } } if( prev == VM_MAP_ENTRY_NULL){ prev = vm_map_to_entry(map); } *vm_entry = prev; return FALSE; }
/* recursively free a vertree_entry. caller frees entry itself */ static void ct_version_tree_free_entry(struct ct_vertree_entry *entry) { struct ct_vertree_ver *ventry; struct ct_vertree_entry *child; if (entry == NULL) { return; } if (entry->cve_name != NULL) e_free(&entry->cve_name); /* Clean up version entries */ while ((ventry = TAILQ_FIRST(&entry->cve_versions)) != NULL) { TAILQ_REMOVE(&entry->cve_versions, ventry, cvv_link); ct_version_tree_free_version(ventry); } /* Clean up children */ while ((child = RB_ROOT(&entry->cve_children)) != NULL) { RB_REMOVE(ct_vertree_entries, &entry->cve_children, child); ct_version_tree_free_entry(child); e_free(&child); } }
void ct_extract_pending_cleanup(struct ct_pending_files *head) { struct ct_pending_file *file; while ((file = RB_ROOT(head)) != NULL) { ct_extract_free_entry(head, file); } }
void clear_notify_queue(struct notify_queue *queue) { struct notify_item *item; while ((item = RB_ROOT(queue))) { RB_REMOVE(notify_queue, queue, item); free(item); } }
void free_tupid_tree(struct tupid_entries *root) { struct tupid_tree *tt; while((tt = RB_ROOT(root)) != NULL) { tupid_tree_rm(root, tt); free(tt); } }
void ct_rb_unwind(struct ct_match_tree *head) { struct ct_match_node *n; while ((n = RB_ROOT(head)) != NULL) { RB_REMOVE(ct_match_tree, head, n); e_free(&n->cmn_string); e_free(&n); } }
/* Destroy a job tree. */ void job_tree_free(struct jobs *jobs) { struct job *job; while (!RB_EMPTY(jobs)) { job = RB_ROOT(jobs); RB_REMOVE(jobs, jobs, job); job_free(job); } }
void vm_map_store_walk_rb( vm_map_t map, vm_map_entry_t *wrong_vme, vm_map_entry_t *vm_entry) { struct vm_map_header hdr = map->hdr; struct vm_map_store *rb_entry = RB_ROOT(&(hdr.rb_head_store)); vm_map_entry_t cur = *vm_entry; rb_entry = RB_FIND( rb_head, &(hdr.rb_head_store), &(cur->store)); if(rb_entry == NULL) panic("NO SUCH ENTRY %p. Gave back %p", *vm_entry, *wrong_vme); else panic("Cur: %p, L: %p, R: %p", VME_FOR_STORE(rb_entry), VME_FOR_STORE(RB_LEFT(rb_entry,entry)), VME_FOR_STORE(RB_RIGHT(rb_entry,entry))); }
int vardb_close(struct vardb *v) { struct string_tree *st; while((st = RB_ROOT(&v->root)) != NULL) { struct var_entry *ve = container_of(st, struct var_entry, var); string_tree_rm(&v->root, st); free(st->s); free(ve->value); free(ve); } return 0; }
/* Free an environment. */ void environ_free(struct environ *env) { struct environ_entry *envent; while (!RB_EMPTY(env)) { envent = RB_ROOT(env); RB_REMOVE(environ, env, envent); free(envent->name); free(envent->value); free(envent); } }
void expand_free(struct expand *expand) { struct expandnode *xn; if (expand->queue) while ((xn = TAILQ_FIRST(expand->queue))) TAILQ_REMOVE(expand->queue, xn, tq_entry); while ((xn = RB_ROOT(&expand->tree)) != NULL) { RB_REMOVE(expandtree, &expand->tree, xn); free(xn); } }
void options_free(struct options *oo) { struct options_entry *o; while (!RB_EMPTY(&oo->tree)) { o = RB_ROOT(&oo->tree); RB_REMOVE(options_tree, &oo->tree, o); free(o->name); if (o->type == OPTIONS_STRING) free(o->str); free(o); } }
int main(int argc, char **argv) { struct node *tmp, *ins; int i, max, min; RB_INIT(&root); for (i = 0; i < ITER; i++) { tmp = malloc(sizeof(struct node)); if (tmp == NULL) err(1, "malloc"); do { tmp->key = arc4random_uniform(MAX-MIN); tmp->key += MIN; } while (RB_FIND(tree, &root, tmp) != NULL); if (i == 0) max = min = tmp->key; else { if (tmp->key > max) max = tmp->key; if (tmp->key < min) min = tmp->key; } if (RB_INSERT(tree, &root, tmp) != NULL) errx(1, "RB_INSERT failed"); } ins = RB_MIN(tree, &root); if (ins->key != min) errx(1, "min does not match"); tmp = ins; ins = RB_MAX(tree, &root); if (ins->key != max) errx(1, "max does not match"); if (RB_REMOVE(tree, &root, tmp) != tmp) errx(1, "RB_REMOVE failed"); for (i = 0; i < ITER - 1; i++) { tmp = RB_ROOT(&root); if (tmp == NULL) errx(1, "RB_ROOT error"); if (RB_REMOVE(tree, &root, tmp) != tmp) errx(1, "RB_REMOVE error"); free(tmp); } exit(0); }
void exec_pending_notifications(void) { if (!notify_pending) return ; ENTER(); for (int i = 0; i < 16; i++) { if (slots[i].cb && RB_ROOT(&slots[i].queue)) slots[i].cb(slots[i].data, &slots[i].queue); } notify_pending = 0; EXIT(); }
void ct_cleanup_login_cache(void) { struct ct_login_cache *tmp; while ((tmp = RB_ROOT(&ct_login_cache)) != NULL) { RB_REMOVE(ct_login_cache_tree, &ct_login_cache, tmp); /* may cache negative entries, uid not found, avoid NULL free */ if (tmp->lc_name != NULL) { e_free(&tmp->lc_name); } e_free(&tmp); } ct_login_cache_size = 0; }
/** * Delete a node from one order of a red-black tree * * This function has three parameters: a tree, the node to delete, and * the order from which to delete it. _rb_delete() is an * implementation of the routine RB-DELETE on p. 273 of Cormen et * al. (p. 324 in the paperback version of the 2009 edition). */ HIDDEN void _rb_delete(struct bu_rb_tree *tree, struct bu_rb_node *node, int order) { struct bu_rb_node *y; /* The node to splice out */ struct bu_rb_node *parent; struct bu_rb_node *only_child; BU_CKMAG(tree, BU_RB_TREE_MAGIC, "red-black tree"); BU_CKMAG(node, BU_RB_NODE_MAGIC, "red-black node"); RB_CKORDER(tree, order); if (UNLIKELY(tree->rbt_debug & BU_RB_DEBUG_DELETE)) bu_log("_rb_delete(%p, %p, %d): data=%p\n", (void*)tree, (void*)node, order, RB_DATA(node, order)); if ((RB_LEFT_CHILD(node, order) == RB_NULL(tree)) || (RB_RIGHT_CHILD(node, order) == RB_NULL(tree))) y = node; else y = rb_neighbor(node, order, SENSE_MAX); if (RB_LEFT_CHILD(y, order) == RB_NULL(tree)) only_child = RB_RIGHT_CHILD(y, order); else only_child = RB_LEFT_CHILD(y, order); parent = RB_PARENT(only_child, order) = RB_PARENT(y, order); if (parent == RB_NULL(tree)) RB_ROOT(tree, order) = only_child; else if (y == RB_LEFT_CHILD(parent, order)) RB_LEFT_CHILD(parent, order) = only_child; else RB_RIGHT_CHILD(parent, order) = only_child; /* * Splice y out if it's not node */ if (y != node) { (node->rbn_package)[order] = (y->rbn_package)[order]; ((node->rbn_package)[order]->rbp_node)[order] = node; } if (RB_GET_COLOR(y, order) == RB_BLK) _rb_fixup(tree, only_child, order); if (--(y->rbn_pkg_refs) == 0) rb_free_node(y); }
void image_loader_cleanup() { for(;;) { struct image_cache *entry = RB_ROOT(&image_cache); if(!entry) break; SDL_FreeSurface(entry->surface); free(entry->alias); entry = image_cache_tree_RB_REMOVE(&image_cache, entry); free(entry); } }
void peak_track_exit(struct peak_tracks *self) { struct peak_track *flow; if (!self) { return; } while ((flow = RB_ROOT(&self->flows))) { RB_REMOVE(peak_track_tree, &self->flows, flow); prealloc_put(&self->mem, flow); } prealloc_exit(&self->mem); free(self); }
void * bu_rb_search (struct bu_rb_tree *tree, int order, void *data) { int (*compare)(const void *, const void *); struct bu_rb_node *node; BU_CKMAG(tree, BU_RB_TREE_MAGIC, "red-black tree"); RB_CKORDER(tree, order); compare = RB_COMPARE_FUNC(tree, order); node = _rb_search(RB_ROOT(tree, order), order, compare, data); if (node == RB_NULL(tree)) return NULL; else return RB_DATA(node, order); }
void rb_walk(struct bu_rb_tree *tree, int order, void (*visit)(void), int what_to_visit, int trav_type) { static void (*walk[][3])(void) = { { BU_RB_WALK_FUNC_CAST_AS_FUNC_ARG(prewalknodes), BU_RB_WALK_FUNC_CAST_AS_FUNC_ARG(inwalknodes), BU_RB_WALK_FUNC_CAST_AS_FUNC_ARG(postwalknodes) }, { BU_RB_WALK_FUNC_CAST_AS_FUNC_ARG(prewalkdata), BU_RB_WALK_FUNC_CAST_AS_FUNC_ARG(inwalkdata), BU_RB_WALK_FUNC_CAST_AS_FUNC_ARG(postwalkdata) } }; BU_CKMAG(tree, BU_RB_TREE_MAGIC, "red-black tree"); RB_CKORDER(tree, order); switch (trav_type) { case BU_RB_WALK_PREORDER: case BU_RB_WALK_INORDER: case BU_RB_WALK_POSTORDER: switch (what_to_visit) { case WALK_NODES: case WALK_DATA: { BU_RB_WALK_FUNC_FUNC_DECL(_walk_func); _walk_func = BU_RB_WALK_FUNC_CAST_AS_FUNC_FUNC(walk[what_to_visit][trav_type]); _walk_func(RB_ROOT(tree, order), order, visit, 0); } break; default: bu_log("ERROR: rb_walk(): Illegal visitation object: %d\n", what_to_visit); bu_bomb(""); } break; default: bu_log("ERROR: rb_walk(): Illegal traversal type: %d\n", trav_type); bu_bomb(""); } }
struct ieee80211_node * ieee80211_find_node(struct ieee80211com *ic, const u_int8_t *macaddr) { struct ieee80211_node *ni; int cmp; /* similar to RB_FIND except we compare keys, not nodes */ ni = RB_ROOT(&ic->ic_tree); while (ni != NULL) { cmp = memcmp(macaddr, ni->ni_macaddr, IEEE80211_ADDR_LEN); if (cmp < 0) ni = RB_LEFT(ni, ni_node); else if (cmp > 0) ni = RB_RIGHT(ni, ni_node); else break; } return ni; }
enum cmd_retval cmd_unbind_key_exec(struct cmd *self, struct cmd_q *cmdq) { struct args *args = self->args; struct key_binding *bd; int key; if (!args_has(args, 'a')) { if (args->argc != 1) { cmdq_error(cmdq, "missing key"); return (CMD_RETURN_ERROR); } key = key_string_lookup_string(args->argv[0]); if (key == KEYC_NONE) { cmdq_error(cmdq, "unknown key: %s", args->argv[0]); return (CMD_RETURN_ERROR); } } else { if (args->argc != 0) { cmdq_error(cmdq, "key given with -a"); return (CMD_RETURN_ERROR); } key = KEYC_NONE; } if (args_has(args, 't')) return (cmd_unbind_key_table(self, cmdq, key)); if (key == KEYC_NONE) { while (!RB_EMPTY(&key_bindings)) { bd = RB_ROOT(&key_bindings); key_bindings_remove(bd->key); } return (CMD_RETURN_NORMAL); } if (!args_has(args, 'n')) key |= KEYC_PREFIX; key_bindings_remove(key); return (CMD_RETURN_NORMAL); }