/** Add a child * * @param[in] parent to add child to. * @param[in] child to add. */ void _cf_item_add(CONF_ITEM *parent, CONF_ITEM *child) { fr_cursor_t to_merge; CONF_ITEM *ci; rad_assert(parent != child); if (!parent || !child) return; /* * New child, add child trees. */ if (!parent->ident1) parent->ident1 = rbtree_create(parent, _cf_ident1_cmp, NULL, RBTREE_FLAG_NONE); if (!parent->ident2) parent->ident2 = rbtree_create(parent, _cf_ident2_cmp, NULL, RBTREE_FLAG_NONE); fr_cursor_init(&to_merge, &child); for (ci = fr_cursor_head(&to_merge); ci; ci = fr_cursor_next(&to_merge)) { rbtree_insert(parent->ident1, ci); rbtree_insert(parent->ident2, ci); /* NULL ident2 is still a value */ fr_cursor_append(&parent->cursor, ci); /* Append to the list of children */ } }
void top_obj_init() { r = rbtree_create(); r2 = rbtree_create(); printf("#'<object uid>-<object allocator function>' - <#access-to-object> [<#distant-accesses> <%% local>] [<%%-of-total-access-that-are-on-the-obj>] [<#access-avoidable-by-pinning-threads> <%%-of access to the obj>] [<#access-avoidable-by-allocating on different node> <%%>]\n"); printf("#\tNODES [<number of access performed FROM the node (one fig per node)>+]"); printf("]\tHOSTED ON [<number of accesses performed TO the node (one fig per node)>+]"); printf("] ALLOCATED BY SELF [<%% of accesses done by the thread that allocated the object> (#access/#total access to the obj) (<# access done by the allocating thread before any other thread accessed the object)]\n\n"); }
/* * Reset the cached entries. */ static int cache_reset(rlm_radutmp_t *inst, radutmp_cache_t *cache) { NAS_PORT *this, *next; /* * Cache is already reset, do nothing. */ if ((rbtree_num_elements(cache->nas_ports) == 0) && (cache->free_offsets == NULL)) { DEBUG2(" rlm_radutmp: Not resetting the cache"); return 1; } DEBUG2(" rlm_radutmp: Resetting the cache"); pthread_mutex_lock(&cache->mutex); rbtree_free(inst->user_tree); rbtree_free(cache->nas_ports); for (this = cache->free_offsets; this != NULL; this = next) { next = this->next; free(this); } cache->free_offsets = NULL; /* * Re-create the caches. */ cache->nas_ports = rbtree_create(nas_port_cmp, free, 0); if (!cache->nas_ports) { pthread_mutex_unlock(&cache->mutex); radlog(L_ERR, "rlm_radutmp: No memory"); return 0; } cache->max_offset = 0; cache->cached_file = 1; if (inst->case_sensitive) { inst->user_tree = rbtree_create(user_cmp, free, 0); } else { inst->user_tree = rbtree_create(user_case_cmp, free, 0); } if (!inst->user_tree) { pthread_mutex_unlock(&cache->mutex); radlog(L_ERR, "rlm_radutmp: No memory"); return 0; } pthread_mutex_unlock(&cache->mutex); return 1; }
int main() { /* * default flags */ rbtree_t rbt; rbtree_create(&rbt, sizeof(int)); int n = 1; rbtree_add(&rbt, &n); rbtree_add(&rbt, &n); rbtree_add(&rbt, &n); rbtree_add(&rbt, &n); rbtree_add(&rbt, &n); rbtree_add(&rbt, &n); print_rbt(rbt.root, 0); rbtree_destroy( &rbt ); /* * right leaning flag */ rbtree_create(&rbt, sizeof(int)); rbtree_set_flags(&rbt, rbt.flags & ~G_RB_LEFT_LEANING); rbtree_add(&rbt, &n); rbtree_add(&rbt, &n); rbtree_add(&rbt, &n); rbtree_add(&rbt, &n); rbtree_add(&rbt, &n); rbtree_add(&rbt, &n); print_rbt(rbt.root, 0); rbtree_destroy( &rbt ); /* * override flag */ rbtree_create(&rbt, sizeof(int)); rbtree_set_flags(&rbt, G_RB_EQUAL_OVERRIDE); rbtree_add(&rbt, &n); rbtree_add(&rbt, &n); rbtree_add(&rbt, &n); rbtree_add(&rbt, &n); rbtree_add(&rbt, &n); rbtree_add(&rbt, &n); print_rbt(rbt.root, 0); rbtree_destroy( &rbt ); return 0; }
/** read creation entry */ static void read_create(rbtree_t* all, FILE* in) { struct order_lock* o = calloc(1, sizeof(struct order_lock)); if(!o) fatal_exit("malloc failure"); if(fread(&o->id.thr, sizeof(int), 1, in) != 1 || fread(&o->id.instance, sizeof(int), 1, in) != 1 || !readup_str(&o->create_file, in) || fread(&o->create_line, sizeof(int), 1, in) != 1) fatal_exit("fread failed"); o->smaller = rbtree_create(order_lock_cmp); o->node.key = &o->id; if(!rbtree_insert(all, &o->node)) { /* already inserted */ struct order_lock* a = (struct order_lock*)rbtree_search(all, &o->id); log_assert(a); a->create_file = o->create_file; a->create_line = o->create_line; free(o->smaller); free(o); o = a; } if(verb) printf("read create %u %u %s %d\n", (unsigned)o->id.thr, (unsigned)o->id.instance, o->create_file, o->create_line); }
/* * Caller is responsible for managing the packet entries. */ fr_packet_list_t *fr_packet_list_create(int alloc_id) { int i; fr_packet_list_t *pl; pl = malloc(sizeof(*pl)); if (!pl) return NULL; memset(pl, 0, sizeof(*pl)); pl->tree = rbtree_create(packet_entry_cmp, NULL, 0); if (!pl->tree) { fr_packet_list_free(pl); return NULL; } for (i = 0; i < MAX_SOCKETS; i++) { pl->sockets[i].sockfd = -1; } if (alloc_id) { pl->alloc_id = 1; pl->dst2id_ht = fr_hash_table_create(packet_dst2id_hash, packet_dst2id_cmp, packet_dst2id_free); if (!pl->dst2id_ht) { fr_packet_list_free(pl); return NULL; } } return pl; }
static int securid_instantiate(CONF_SECTION *conf, void **instance) { rlm_securid_t *inst; /* Set up a storage area for instance data */ inst = rad_malloc(sizeof(*inst)); if (!inst) return -1; memset(inst, 0, sizeof(*inst)); /* If the configuration parameters can't be parsed, then fail. */ if (cf_section_parse(conf, inst, module_config) < 0) { radlog(L_ERR|L_CONS, "rlm_securid: Unable to parse configuration section."); securid_detach(inst); return -1; } /* * Lookup sessions in the tree. We don't free them in * the tree, as that's taken care of elsewhere... */ inst->session_tree = rbtree_create(securid_session_cmp, NULL, 0); if (!inst->session_tree) { radlog(L_ERR|L_CONS, "rlm_securid: Cannot initialize session tree."); securid_detach(inst); return -1; } pthread_mutex_init(&(inst->session_mutex), NULL); *instance = inst; return 0; }
struct val_anchors* anchors_create(void) { struct val_anchors* a = (struct val_anchors*)calloc(1, sizeof(*a)); if(!a) return NULL; a->region = regional_create(); if(!a->region) { free(a); return NULL; } a->tree = rbtree_create(anchor_cmp); if(!a->tree) { anchors_delete(a); return NULL; } a->autr = autr_global_create(); if(!a->autr) { anchors_delete(a); return NULL; } lock_basic_init(&a->lock); lock_protect(&a->lock, a, sizeof(*a)); lock_protect(&a->lock, a->autr, sizeof(*a->autr)); return a; }
int main(int argc, char **argv) { int *v; int i; char buf[256]; int c; int ofx = 0; rbtree_t *rbt = rbtree_create(rbtree_cmp_keys_int32, free); printf("\e[1;1H\e[2J"); printf("Enter an integer number: "); while((c = getchar())) { if (c == EOF) break; if (c == '\n') { buf[ofx] = 0; int *num = malloc(sizeof(int)); *num = strtol(buf, NULL, 10); printf("Added node: %d\n\n", *num); rbtree_add(rbt, num, sizeof(int), num, sizeof(int)); printf("\e[1;1H\e[2J"); rbtree_print(rbt); ofx = 0; printf("Enter an integer number: "); } else {
/***************************************************************************** * Creates the data to be passed to the SAX parser ****************************************************************************/ void* create_dreme_io_xml_sax_context(void *user_data, DREME_IO_XML_CALLBACKS_T *callbacks) { PS_T *ps; CHARBUF_T *buf; ps = (PS_T*)mm_malloc(sizeof(PS_T)); memset(ps, 0, sizeof(PS_T)); ps->state = PS_START; ps->udepth = 0; ps->callbacks = callbacks; ps->user_data = user_data; ps->seen_alphabet = false; ps->seen_ambig = false; ps->alph_ids = rbtree_create(rbtree_strcmp, rbtree_strcpy, free, rbtree_intcpy, free); ps->freqs = NULL; ps->motif_id = NULL; ps->last_pos = 0; //set up character buffer buf = &(ps->characters); buf->buffer = mm_malloc(sizeof(char)*10); buf->buffer[0] = '\0'; buf->size = 10; buf->pos = 0; attrbuf_init(&(ps->attrbuf)); //set up expected queue ps->expected_stack = linklst_create(); return ps; }
extern void kthread_init_runqueue(kthread_runqueue_t *kthread_runq) { gt_spinlock_init(&(kthread_runq->kthread_runqlock)); kthread_runq->cfs_rq = rbtree_create(); return; }
MemoryCache* MC_MemoryCache_ctor(HANDLE PHDL, bool dont_read_from_quicksilver_places) { MemoryCache* rt=DCALLOC(MemoryCache, 1, "MemoryCache"); rt->PHDL=PHDL; rt->_cache=rbtree_create(true, "MemoryCache._cache", compare_size_t); rt->dont_read_from_quicksilver_places=dont_read_from_quicksilver_places; return rt; };
/* init rbtree tests */ static int init_rbtree(void) { /* is leaked */ reg = region_create(malloc, free); if(reg == 0) return 1; tree = rbtree_create(reg, testcompare); if(tree == 0) return 1; return 0; }
MemoryCache* MC_MemoryCache_ctor_testing(BYTE *testing_memory, SIZE_T testing_memory_size) { oassert ((testing_memory_size & (PAGE_SIZE-1))==0); MemoryCache* rt=DCALLOC(MemoryCache, 1, "MemoryCache"); rt->_cache=rbtree_create(true, "MemoryCache._cache", compare_size_t); rt->testing=true; rt->testing_memory=testing_memory; rt->testing_memory_size=testing_memory_size; return rt; };
/***************************************************************************** * Create the datastructure for storing motifs while their content is * still being parsed. ****************************************************************************/ static CTX_T* create_parser_data(int options, const char *optional_file_name) { CTX_T *data; data = (CTX_T*)mm_malloc(sizeof(CTX_T)); memset(data, 0, sizeof(CTX_T)); data->format_match = file_name_match("meme", "xml", optional_file_name); data->warnings = linklst_create(); data->errors = linklst_create(); data->motif_queue = linklst_create(); data->options = options; data->letter_lookup = rbtree_create(rbtree_strcmp, rbtree_strcpy, free, rbtree_strcpy, free); data->alph = NULL; data->alph_rdr = NULL; data->nums = NULL; if (options & SCANNED_SITES) { data->sequence_lookup = rbtree_create(rbtree_strcmp, rbtree_strcpy, free, NULL, destroy_seqinfo); data->motif_lookup = rbtree_create(rbtree_strcmp, rbtree_strcpy, free, rbtree_intcpy, free); } return data; }
int try_rbtree() { RBTree *t = rbtree_create(); long int i; for (i = 0; i != 10; i++) { rbtree_put(t, random() % 100, (void *)i); // rbtree_show(t); // printf("--------------------------\n"); } rbtree_show(t); return 0; }
/** insert lock entry (empty) into list */ static struct order_lock* insert_lock(rbtree_t* all, struct order_id* id) { struct order_lock* o = calloc(1, sizeof(struct order_lock)); if(!o) fatal_exit("malloc failure"); o->smaller = rbtree_create(order_lock_cmp); o->id = *id; o->node.key = &o->id; if(!rbtree_insert(all, &o->node)) fatal_exit("insert fail should not happen"); return o; }
/* * Instantiate. */ static int radutmp_instantiate(CONF_SECTION *conf, void **instance) { rlm_radutmp_t *inst; inst = rad_malloc(sizeof(*inst)); if (!inst) { return -1; } memset(inst, 0, sizeof(*inst)); if (cf_section_parse(conf, inst, module_config)) { radutmp_detach(inst); return -1; } inst->cache.nas_ports = rbtree_create(nas_port_cmp, free, 0); if (!inst->cache.nas_ports) { radlog(L_ERR, "rlm_radutmp: Failed to create nas tree"); radutmp_detach(inst); return -1; } pthread_mutex_init(&(inst->cache.mutex), NULL); inst->cache.permission = inst->permission; if (inst->case_sensitive) { inst->user_tree = rbtree_create(user_cmp, free, 0); } else { inst->user_tree = rbtree_create(user_case_cmp, free, 0); } if (!inst->user_tree) { radlog(L_ERR, "rlm_radutmp: Failed to create user tree"); radutmp_detach(inst); return -1; } *instance = inst; return 0; }
void wres_event_init() { int i; if (wres_event_stat & WRES_STAT_INIT) return; for (i = 0; i < WRES_EVENT_GROUP_SIZE; i++) { pthread_mutex_init(&wres_event_group[i].mutex, NULL); wres_event_group[i].head = rbtree_create(); } wres_event_stat |= WRES_STAT_INIT; }
map *map_create(void *type) { rbtree_type *rt = (rbtree_type *)type; map *m = (map *)calloc(sizeof(map), 1); if (m == NULL) { return NULL; } m->_t = rbtree_create(rt); if (m->_t == NULL) { free(m); return NULL; } return m; }
static struct diff_read_data* diff_read_data_create() { region_type* region = region_create(xalloc, free); struct diff_read_data* data = (struct diff_read_data*) region_alloc(region, sizeof(struct diff_read_data)); if(!data) { log_msg(LOG_ERR, "out of memory, %s:%d", __FILE__, __LINE__); exit(1); } data->region = region; data->zones = rbtree_create(region, (int (*)(const void *, const void *)) dname_compare); return data; }
int forwards_apply_cfg(struct iter_forwards* fwd, struct config_file* cfg) { fwd_del_tree(fwd); fwd->tree = rbtree_create(fwd_cmp); if(!fwd->tree) return 0; /* read forward zones */ if(!read_forwards(fwd, cfg)) return 0; if(!make_stub_holes(fwd, cfg)) return 0; fwd_init_parents(fwd); return 1; }
static struct diff_zone* diff_read_insert_zone(struct diff_read_data* data, const char* name) { const dname_type* dname = dname_parse(data->region, name); struct diff_zone* zp = region_alloc(data->region, sizeof(struct diff_zone)); if(!zp) { log_msg(LOG_ERR, "out of memory, %s:%d", __FILE__, __LINE__); exit(1); } zp->node = *RBTREE_NULL; zp->node.key = dname; zp->parts = rbtree_create(data->region, intcompf); rbtree_insert(data->zones, (rbnode_t*)zp); return zp; }
int chunk_manager_init (struct chunk_manager *cm, int fd, int mode){ LOG(INFO, "chunk_manager_init called\n"); assert(cm); cm -> cur_chunk_size = DEFAULT_CHUNK_SIZE; cm -> fd = fd; int i; for (i = 0; i < POOL_SIZE; i++){ chunk_init_unused(cm -> chunk_pool + i); } cm -> cur_chunk_index = 0; cm -> rbtree = rbtree_create(chunk_cmp, chunk_free_node, 1); if (!cm -> rbtree) return 1; return 0; }
/** Register a map processor * * This should be called by every module that provides a map processing function. * * @param[in] mod_inst of module registering the map_proc. * @param[in] name of map processor. If processor already exists, it is replaced. * @param[in] evaluate Module's map processor function. * @param[in] escape function to sanitize any sub expansions in the map source query. * @param[in] instantiate function (optional). * @param[in] inst_size of talloc chunk to allocate for instance data (optional). * @return * - 0 on success. * - -1 on failure. */ int map_proc_register(void *mod_inst, char const *name, map_proc_func_t evaluate, xlat_escape_t escape, map_proc_instantiate_t instantiate, size_t inst_size) { map_proc_t *proc; rad_assert(name && name[0]); if (!map_proc_root) { map_proc_root = rbtree_create(NULL, map_proc_cmp, NULL, RBTREE_FLAG_REPLACE); if (!map_proc_root) { DEBUG("map_proc: Failed to create tree"); return -1; } } /* * If it already exists, replace it. */ proc = map_proc_find(name); if (!proc) { rbnode_t *node; proc = talloc_zero(mod_inst, map_proc_t); strlcpy(proc->name, name, sizeof(proc->name)); proc->length = strlen(proc->name); node = rbtree_insert_node(map_proc_root, proc); if (!node) { talloc_free(proc); return -1; } talloc_set_destructor(proc, _map_proc_unregister); } DEBUG3("map_proc_register: %s", proc->name); proc->mod_inst = mod_inst; proc->evaluate = evaluate; proc->escape = escape; proc->instantiate = instantiate; proc->inst_size = inst_size; return 0; }
static int mod_instantiate(UNUSED CONF_SECTION *conf, void *instance) { rlm_securid_t *inst = instance; /* * Lookup sessions in the tree. We don't free them in * the tree, as that's taken care of elsewhere... */ inst->session_tree = rbtree_create(NULL, securid_session_cmp, NULL, 0); if (!inst->session_tree) { ERROR("rlm_securid: Cannot initialize session tree"); return -1; } pthread_mutex_init(&(inst->session_mutex), NULL); return 0; }
MemoryCache* MC_MemoryCache_copy_ctor (MemoryCache *mc) { MemoryCache* rt; //L (2, __FUNCTION__"(): begin\n"); rt=DCALLOC(MemoryCache, 1, "MemoryCache"); rt->PHDL=mc->PHDL; rt->dont_read_from_quicksilver_places=mc->dont_read_from_quicksilver_places; rt->_cache=rbtree_create(true, "MemoryCache._cache", compare_size_t); rbtree_copy (mc->_cache, rt->_cache, key_copier, value_copier); #ifdef BOLT_DEBUG rt->testing=mc->testing; rt->testing_memory=mc->testing_memory; rt->testing_memory_size=mc->testing_memory_size; #endif return rt; };
/** create event base */ void *event_init(uint32_t* time_secs, struct timeval* time_tv) { struct event_base* base = (struct event_base*)malloc( sizeof(struct event_base)); if(!base) return NULL; memset(base, 0, sizeof(*base)); base->time_secs = time_secs; base->time_tv = time_tv; if(settime(base) < 0) { event_base_free(base); return NULL; } base->times = rbtree_create(mini_ev_cmp); if(!base->times) { event_base_free(base); return NULL; } base->capfd = MAX_FDS; #ifdef FD_SETSIZE if((int)FD_SETSIZE < base->capfd) base->capfd = (int)FD_SETSIZE; #endif base->fds = (struct event**)calloc((size_t)base->capfd, sizeof(struct event*)); if(!base->fds) { event_base_free(base); return NULL; } base->signals = (struct event**)calloc(MAX_SIG, sizeof(struct event*)); if(!base->signals) { event_base_free(base); return NULL; } #ifndef S_SPLINT_S FD_ZERO(&base->reads); FD_ZERO(&base->writes); #endif return base; }
/* * Do any per-module initialization that is separate to each * configured instance of the module. e.g. set up connections * to external databases, read configuration files, set up * dictionary entries, etc. * * If configuration information is given in the config section * that must be referenced in later calls, store a handle to it * in *instance otherwise put a null pointer there. */ static int policy_instantiate(CONF_SECTION *conf, void **instance) { rlm_policy_t *inst; /* * Set up a storage area for instance data */ inst = rad_malloc(sizeof(*inst)); if (!inst) { return -1; } memset(inst, 0, sizeof(*inst)); /* * If the configuration parameters can't be parsed, then * fail. */ if (cf_section_parse(conf, inst, module_config) < 0) { policy_detach(inst); return -1; } inst->policies = rbtree_create(policyname_cmp, rlm_policy_free_item, 0); if (!inst->policies) { policy_detach(inst); return -1; } /* * Parse the policy from the file. */ if (!rlm_policy_parse(inst->policies, inst->filename)) { policy_detach(inst); return -1; } *instance = inst; return 0; }
name_tree_t * name_tree_create() { name_tree_t *tree = calloc(1, sizeof(name_tree_t)); if (!tree) return NULL; tree->_tree = rbtree_create(name_compare); if (!tree->_tree) goto MEM_ALLOC_FAILED; tree->heap = heap_init(1000, elem_compare); if (!tree->heap) goto MEM_ALLOC_FAILED; tree->node_pool = mem_pool_create(sizeof(rbnode_t), 1024, true); if (!tree->node_pool) goto MEM_ALLOC_FAILED; tree->name_pool = mem_pool_create(sizeof(node_value_t), 1024, true); if (!tree->name_pool) goto MEM_ALLOC_FAILED; tree->lru = lru_list_create(); if (!tree->lru) goto MEM_ALLOC_FAILED; tree->count = 0; return tree; MEM_ALLOC_FAILED : if (tree->name_pool) mem_pool_delete(tree->name_pool); if (tree->node_pool) mem_pool_delete(tree->node_pool); if (tree->_tree) free(tree->_tree); if (tree->heap) heap_destory(tree->heap); if (tree->lru) lru_list_destroy(tree->lru); free(tree); }