void destroy_itemset( struct itemset * this_itemset ) { int i; char * key; struct list * key_list = new_list(); list_keys_in_hash( this_itemset->items, key_list, "" ); for ( i = 0; i < key_list->next_index; i++ ) { key = listlookup( key_list, i ); struct item * data = hashlookup( this_itemset->items, key )->data; free( data ); } destroy_key_list( key_list ); destroy_hash( this_itemset->items ); key_list = new_list(); list_keys_in_hash( this_itemset->ready_for, key_list, "" ); for ( i = 0; i < key_list->next_index; i++ ) { key = listlookup( key_list, i ); struct list * ready_items = hashlookup( this_itemset->ready_for, key )->data; destroy_list( ready_items ); } destroy_key_list( key_list ); destroy_hash( this_itemset->ready_for ); destroy_list( this_itemset->complete ); free( this_itemset ); return; }
void destroy_data(void) { if(rules_hash){ destroy_hash(0); destroy_hash(1); shm_free(rules_hash); rules_hash = 0; } if(crt_idx) shm_free(crt_idx); }
void destroy_data(void) { dp_connection_list_t *el, *next; LM_DBG("Destroying data\n"); for (el = dp_conns; el && (next = el->next, 1); el = next) { destroy_hash(&el->hash[0]); destroy_hash(&el->hash[1]); lock_destroy_rw(el->ref_lock); shm_free(el); } }
struct llist * get_outgroup_nodes(struct rooted_tree *tree, struct llist *labels) { struct hash *map; struct llist *outgroup_nodes; struct list_elem *el; map = create_label2node_map(tree->nodes_in_order); outgroup_nodes = create_llist(); if (NULL == outgroup_nodes) { perror(NULL); exit(EXIT_FAILURE); } for (el = labels->head; NULL != el; el = el->next) { struct rnode *desc; desc = hash_get(map, (char *) el->data); if (NULL == desc) { fprintf (stderr, "WARNING: label '%s' does not occur in tree\n", (char *) el->data); } else { if (! append_element(outgroup_nodes, desc)) { perror(NULL); exit(EXIT_FAILURE); } } } destroy_hash(map); return outgroup_nodes; }
int main(int argc, char *argv[]) { struct rooted_tree *tree; struct hash *rename_map; struct parameters params; params = get_params(argc, argv); rename_map = read_map(params.map_filename); while (NULL != (tree = parse_tree())) { process_tree(tree, rename_map, params); destroy_tree(tree, DONT_FREE_NODE_DATA); } struct llist *keys = hash_keys(rename_map); if (NULL == keys) { perror(NULL); exit(EXIT_FAILURE); } struct list_elem *e; for (e = keys->head; NULL != e; e = e->next) { char *key = (char *) e->data; char *val = hash_get(rename_map, key); free(val); } destroy_llist(keys); destroy_hash(rename_map); return 0; }
static bool finalize_multicast_group_table() { assert( multicast_groups != NULL ); destroy_hash( &multicast_groups->groups ); free( multicast_groups ); multicast_groups = NULL; return true; }
void destroy_key_list( struct list * key_list ) { int key_num = key_list->next_index; while ( key_num-- ) { free( listlookup( key_list, key_num ) ); } destroy_hash(key_list->main_hash->main_hash); free(key_list->main_hash); free(key_list); return; }
void csiebox_client_destroy(csiebox_client** client) { csiebox_client* tmp = *client; *client = 0; if (!tmp) { return; } close(tmp->conn_fd); close(tmp->inotify_fd); destroy_hash(&(tmp->inotify_hash)); free(tmp); }
void destroy_hash( struct hash * current ) { char * child_chars = chars_in( current->child, 0, 0 ); int i = current->num_children; struct hash * child_hash; while ( i-- ) { child_hash = charhashlookup( current->child, child_chars[i] )->data; destroy_hash( child_hash ); } free(child_chars); free(current->sigstr); destroy_charhash(current->child); return; }
struct llist *get_ingroup_leaves(struct rooted_tree *tree, struct llist *excluded_labels) { struct llist *result = create_llist(); if (NULL == result) { perror(NULL); exit(EXIT_FAILURE); } struct hash *excluded_lbl_hash = create_hash(excluded_labels->count); if (NULL == excluded_lbl_hash) { perror(NULL); exit(EXIT_FAILURE); } struct list_elem *el; /* Make a hash with all excluded labels. */ for (el = excluded_labels->head; NULL != el; el = el->next) { if (! hash_set(excluded_lbl_hash, (char *) el->data, (void *) "member")) { perror(NULL); exit(EXIT_FAILURE); } } /* add nodes to result iff i) node is a leaf, ii) node's label is not * among 'excluded_lbl_hash' */ for (el = tree->nodes_in_order->head; NULL != el; el = el->next) { struct rnode *current = (struct rnode *) el->data; if (is_leaf(current)) { bool add = false; if (strcmp ("", current->label) == 0) add = true; else { if (NULL == hash_get(excluded_lbl_hash, current->label)) add = true; } if (add) { if (! append_element(result, current)) { perror(NULL); exit(EXIT_FAILURE); } } } } destroy_hash(excluded_lbl_hash); return result; }
static void create_hash() /* * Create a hash table by looking up the triggers for all of the * MIDI hooks */ { if(hash) /* Already exists? */ destroy_hash(); /* Destroy it */ hash = g_tree_new((GCompareFunc)hashcmp); /* * Go through the list of MIDI hooks and for each, connect * to the trigger according to the path given */ for_each_si(midihooks, (t_for_si)assign_trg, NULL); } /* create_hash() */
void csiebox_client_init( csiebox_client** client, int argc, char** argv) { csiebox_client* tmp = (csiebox_client*)malloc(sizeof(csiebox_client)); if (!tmp) { fprintf(stderr, "client malloc fail\n"); return; } memset(tmp, 0, sizeof(csiebox_client)); if (!parse_arg(tmp, argc, argv)) { fprintf(stderr, "Usage: %s [config file]\n", argv[0]); free(tmp); return; } int fd = client_start(tmp->arg.name, tmp->arg.server); if (fd < 0) { fprintf(stderr, "connect fail\n"); free(tmp); return; } tmp->conn_fd = fd; fd = inotify_init(); if (fd < 0) { fprintf(stderr, "inotify fail\n"); close(tmp->conn_fd); free(tmp); return; } tmp->inotify_fd = fd; if (!init_hash(&(tmp->inotify_hash), 100)) { destroy_hash(&(tmp->inotify_hash)); fprintf(stderr, "hash fail\n"); close(tmp->conn_fd); close(tmp->inotify_fd); free(tmp); } memset(tmp->root, 0, PATH_MAX); realpath(tmp->arg.path, tmp->root); *client = tmp; }
// TODO: // o this f() should be a library f() // o it should return codes for: yes, no, error int is_monophyletic(struct llist *descendants, struct rnode *subtree_root) { int result = TRUE; struct hash *leaf_map = get_leaf_label_map_from_node(subtree_root); if (NULL == leaf_map) { perror(NULL); exit(EXIT_FAILURE); } if (leaf_map->count != descendants->count) { result = FALSE; } struct list_elem *el; for (el = descendants->head; NULL != el; el = el->next) { char *label = ((struct rnode *) el->data)->label; if (NULL == hash_get(leaf_map, label)) { result = FALSE; break; } } destroy_hash(leaf_map); return result; }
//Resize hash_table by a scale factor, if there are more than hash_table->load_factor*hash_table->current_size items in the hash_table void resize_hash_table(hash_table_t hash_table){ uint32_t old_buckets_num = hash_table->buckets_num; item_t * old_hash = hash_table->hash; hash_table->buckets_num = old_buckets_num * hash_table->scale_factor; hash_table->hash = (item_t *) malloc((hash_table->buckets_num) * sizeof(item_t)); assert(hash_table->hash!=NULL&&"hash_table fails to allocate"); // Initialize all hash entries as empty in the new hash_table for(uint32_t i = 0; i < hash_table->buckets_num; ++i){ hash_table->hash[i] = NULL; } //copy all the buckets from the original hash table to the new hash table for(uint32_t i = 0; i < old_buckets_num; ++i){ for (item_t p = old_hash[i];p!=NULL;p=p->next){ hash_table_set(hash_table, p->key,p->val,p->val_size); } } //destroy the old hash_table destroy_hash(old_hash,old_buckets_num); free(old_hash); }
/*load rules from DB*/ int dp_load_db(void) { int i, nr_rows; db1_res_t * res = 0; db_val_t * values; db_row_t * rows; db_key_t query_cols[DP_TABLE_COL_NO] = { &dpid_column, &pr_column, &match_op_column, &match_exp_column, &match_len_column, &subst_exp_column, &repl_exp_column, &attrs_column }; db_key_t order = &pr_column; dpl_node_t *rule; LM_DBG("init\n"); if( (*crt_idx) != (*next_idx)){ LM_WARN("a load command already generated, aborting reload...\n"); return 0; } if (dp_dbf.use_table(dp_db_handle, &dp_table_name) < 0){ LM_ERR("error in use_table %.*s\n", dp_table_name.len, dp_table_name.s); return -1; } if (DB_CAPABILITY(dp_dbf, DB_CAP_FETCH)) { if(dp_dbf.query(dp_db_handle,0,0,0,query_cols, 0, DP_TABLE_COL_NO, order, 0) < 0){ LM_ERR("failed to query database!\n"); return -1; } if(dp_dbf.fetch_result(dp_db_handle, &res, dp_fetch_rows)<0) { LM_ERR("failed to fetch\n"); if (res) dp_dbf.free_result(dp_db_handle, res); return -1; } } else { /*select the whole table and all the columns*/ if(dp_dbf.query(dp_db_handle,0,0,0,query_cols, 0, DP_TABLE_COL_NO, order, &res) < 0){ LM_ERR("failed to query database\n"); return -1; } } nr_rows = RES_ROW_N(res); *next_idx = ((*crt_idx) == 0)? 1:0; destroy_hash(*next_idx); if(nr_rows == 0){ LM_WARN("no data in the db\n"); goto end; } do { for(i=0; i<RES_ROW_N(res); i++){ rows = RES_ROWS(res); values = ROW_VALUES(rows+i); if((rule = build_rule(values)) ==0 ) goto err2; if(add_rule2hash(rule , *next_idx) != 0) goto err2; } if (DB_CAPABILITY(dp_dbf, DB_CAP_FETCH)) { if(dp_dbf.fetch_result(dp_db_handle, &res, dp_fetch_rows)<0) { LM_ERR("failure while fetching!\n"); if (res) dp_dbf.free_result(dp_db_handle, res); return -1; } } else { break; } } while(RES_ROW_N(res)>0); end: /*update data*/ *crt_idx = *next_idx; list_hash(*crt_idx); dp_dbf.free_result(dp_db_handle, res); return 0; err2: if(rule) destroy_rule(rule); destroy_hash(*next_idx); dp_dbf.free_result(dp_db_handle, res); *next_idx = *crt_idx; return -1; }
int emit_function(FUNCTION *func, EMIT_FUNCTIONS *functions, void *data) { GRAPH *graph = func->graph; QUEUE *queue = create_queue(); HASH *done = create_hash(10, key_type_direct); queue_push(queue, tree_get_child(graph, 0)); NODE *last = NULL; while (!queue_is_empty(queue)) { NODE *vertex = queue_pop(queue); if (find_in_hash(done, vertex, sizeof(void *))) continue; do_next: add_to_hash(done, vertex, sizeof(void *), (void *) 1); int label = (int) get_from_hash(graph->labels, vertex, sizeof(void *)); HASH *predecessor_hash = get_from_hash(graph->backward, vertex, sizeof(void *)); HASH_ITERATOR iter; hash_iterator(predecessor_hash, &iter); if (predecessor_hash && (predecessor_hash->num > 1 || (predecessor_hash->num == 1 && last != iter.entry->key))) { functions->emit_label(label, data); } functions->emit_comment(vertex, data); HASH *successor_hash = get_from_hash(graph->forward, vertex, sizeof(void *)); NODE *successor; int successor_label; if (successor_hash) { hash_iterator(successor_hash, &iter); successor = iter.entry->key; successor_label = (int) get_from_hash(graph->labels, successor, sizeof(void *)); } else successor = NULL; if (tree_is_type(vertex, STMT_ENTER)) { functions->emit_enter(vertex, data); } else if (tree_is_type(vertex, STMT_EXIT)) { functions->emit_exit(vertex, data); last = vertex; continue; } else if (tree_is_type(vertex, STMT_ASSIGN)) functions->emit_assign(vertex, data); else if (tree_is_type(vertex, STMT_RETURN)) functions->emit_return(vertex, data); else if (tree_is_type(vertex, STMT_TEST)) { hash_iterator_next(&iter); NODE *branch = iter.entry->key; EDGE_TYPE branch_type = (EDGE_TYPE) iter.entry->data; int branch_label = (int) get_from_hash(graph->labels, branch, sizeof(void *)); functions->emit_test(vertex, branch_type, branch_label, data); if (!find_in_hash(done, branch, sizeof(void *))) queue_push(queue, branch); /* Force label on next vertex, in case we jumped to it in the test's branch. Fixes a bug where the label is omitted just because the test was before it, by neglecting to notice that the test reaches it by a jump. */ vertex = NULL; } last = vertex; if (find_in_hash(done, successor, sizeof(void *))) { functions->emit_jump(successor_label, data); continue; } vertex = successor; if (vertex) goto do_next; } functions->emit_end(data); destroy_queue(queue); destroy_hash(done); return 1; }
//Destroy a hash table void destroy_hash_table(hash_table_t hash_table){ destroy_hash(hash_table->hash, hash_table->buckets_num); free(hash_table->hash); hash_table->buckets_num = 0; hash_table->current_size = 0; }
/*load rules from DB*/ int dp_load_db(void) { int i, nr_rows; db_res_t * res = 0; db_val_t * values; db_row_t * rows; db_key_t query_cols[DP_TABLE_COL_NO] = { &dpid_column, &pr_column, &match_op_column, &match_exp_column, &match_len_column, &subst_exp_column, &repl_exp_column, &attrs_column }; db_key_t order = &pr_column; dpl_node_t *rule; int no_rows = 10; if( (*crt_idx) != (*next_idx)){ LM_WARN("a load command already generated, aborting reload...\n"); return 0; } if (dp_dbf.use_table(dp_db_handle, &dp_table_name) < 0){ LM_ERR("error in use_table\n"); return -1; } if (DB_CAPABILITY(dp_dbf, DB_CAP_FETCH)) { if(dp_dbf.query(dp_db_handle,0,0,0,query_cols, 0, DP_TABLE_COL_NO, order, 0) < 0){ LM_ERR("failed to query database!\n"); return -1; } no_rows = estimate_available_rows( 4+4+4+64+4+64+64+128, DP_TABLE_COL_NO); if (no_rows==0) no_rows = 10; if(dp_dbf.fetch_result(dp_db_handle, &res, no_rows)<0) { LM_ERR("failed to fetch\n"); if (res) dp_dbf.free_result(dp_db_handle, res); return -1; } } else { /*select the whole table and all the columns*/ if(dp_dbf.query(dp_db_handle,0,0,0,query_cols, 0, DP_TABLE_COL_NO, order, &res) < 0){ LM_ERR("failed to query database\n"); return -1; } } nr_rows = RES_ROW_N(res); lock_start_write( ref_lock ); *next_idx = ((*crt_idx) == 0)? 1:0; if(nr_rows == 0){ LM_WARN("no data in the db\n"); goto end; } do { for(i=0; i<RES_ROW_N(res); i++){ rows = RES_ROWS(res); values = ROW_VALUES(rows+i); if ((rule = build_rule(values)) == NULL ) { LM_WARN(" failed to build rule -> skipping\n"); continue; } if(add_rule2hash(rule , *next_idx) != 0) { LM_ERR("add_rule2hash failed\n"); goto err2; } } if (DB_CAPABILITY(dp_dbf, DB_CAP_FETCH)) { if(dp_dbf.fetch_result(dp_db_handle, &res, no_rows)<0) { LM_ERR("failure while fetching!\n"); if (res) dp_dbf.free_result(dp_db_handle, res); lock_stop_write( ref_lock ); return -1; } } else { break; } } while(RES_ROW_N(res)>0); end: destroy_hash(*crt_idx); /*update data*/ *crt_idx = *next_idx; /* release the exclusive writing access */ lock_stop_write( ref_lock ); list_hash(*crt_idx); dp_dbf.free_result(dp_db_handle, res); return 0; err2: if(rule) destroy_rule(rule); destroy_hash(*next_idx); dp_dbf.free_result(dp_db_handle, res); *next_idx = *crt_idx; /* if lock defined - release the exclusive writing access */ if(ref_lock) /* release the readers */ lock_stop_write( ref_lock ); return -1; }
// entry function to pack data // returns zero if any error ULONG pack(void) { ULONG (*get_lz_price)(OFFSET position, struct lzcode * lzcode) = NULL; // generates correct bitlen (price) of code ULONG (*emit)(struct optchain * optch, ULONG actual_len) = NULL; // emits lzcode to the output bit/byte stream ULONG success=1; ULONG actual_len; // actual length of packing (to account for ZX headers containing last unpacked bytes) UBYTE * hash; struct optchain * optch=NULL; static struct lzcode codes[MAX_CODES_SIZE]; // generate codes here; static to ensure it's not on the stack UBYTE curr_byte, last_byte; UWORD index; OFFSET position; // some preparations // if( wrk.packtype==PK_MLZ ) { get_lz_price = &get_lz_price_megalz; emit = &emit_megalz; } else if( wrk.packtype==PK_HRM ) { get_lz_price = &get_lz_price_hrum; emit = &emit_hrum; } else if( wrk.packtype==PK_HST ) { get_lz_price = &get_lz_price_hrust; emit = &emit_hrust; } else { printf("mhmt-pack.c:pack() - format unsupported!\n"); return 0; } actual_len = wrk.inlen; if( wrk.zxheader ) { if( wrk.packtype==PK_HRM ) { actual_len -= 5; } else if( wrk.packtype==PK_HST ) { actual_len -= 6; } else { printf("mhmt-pack.c:pack() - there must be no zxheader for anything except hrust or hrum!\n"); return 0; } } // initializations and preparations init_tb(); hash = build_hash(wrk.indata, actual_len, wrk.prelen); if( !hash ) { printf("mhmt-pack.c:pack() - build_hash() failed!\n"); success = 0; } if( success ) { optch = make_optch(actual_len); if( !optch ) { printf("mhmt-pack.c:pack() - can't make optchain array!\n"); success = 0; } } // go packing! if( success ) { // fill TBs with prebinary date if( wrk.prebin ) { curr_byte=wrk.indata[0LL-wrk.prelen]; // for(position=(1LL-wrk.prelen);position<=0;position++) { last_byte = curr_byte; curr_byte = wrk.indata[position]; index = (last_byte<<8) + curr_byte; if( !add_tb(index,position) ) { printf("mhmt-pack.c:pack() - add_tb() failed!\n"); success = 0; goto ERROR; } } } if( !wrk.greedy ) // default optimal coding { // go generating lzcodes byte-by-byte // curr_byte = wrk.indata[0]; // for(position=1;position<actual_len;position++) { last_byte = curr_byte; curr_byte = wrk.indata[position]; // add current two-byter to the chains index = (last_byte<<8) + curr_byte; if( !add_tb(index,position) ) { printf("mhmt-pack.c:pack() - add_tb() failed!\n"); success = 0; goto ERROR; } // search lzcodes for given position make_lz_codes(position, actual_len, hash, codes); // update optimal chain with lzcodes update_optch(position, codes, get_lz_price, optch); } // all input bytes scanned, chain built, so now reverse it (prepare for scanning in output generation part) reverse_optch(optch, actual_len); } else // greedy coding { printf("mhmt-pack.c:pack() - greedy coding not supported!\n"); success = 0; } // data built, now emit packed file success = success && (*emit)(optch, actual_len); } ERROR: free_optch(optch); destroy_hash(hash, wrk.prelen); return success; }
void stop_midi_hooks() { destroy_hash(); } /* stop_midi_hooks() */
/*load rules from DB*/ int dp_load_db(dp_connection_list_p dp_conn) { int i, nr_rows; db_res_t * res = 0; db_val_t * values; db_row_t * rows; db_key_t query_cols[DP_TABLE_COL_NO] = { &dpid_column, &pr_column, &match_op_column, &match_exp_column, &match_flags_column, &subst_exp_column, &repl_exp_column, &attrs_column, &timerec_column }; db_key_t order = &pr_column; /* disabled condition */ db_key_t cond_cols[1] = { &disabled_column }; db_val_t cond_val[1]; dpl_node_t *rule; int no_rows = 10; lock_start_write( dp_conn->ref_lock ); if( dp_conn->crt_index != dp_conn->next_index){ LM_WARN("a load command already generated, aborting reload...\n"); lock_stop_write( dp_conn->ref_lock ); return 0; } dp_conn->next_index = dp_conn->crt_index == 0 ? 1 : 0; lock_stop_write( dp_conn->ref_lock ); if (dp_conn->dp_dbf.use_table(*dp_conn->dp_db_handle, &dp_conn->table_name) < 0){ LM_ERR("error in use_table\n"); goto err1; } VAL_TYPE(cond_val) = DB_INT; VAL_NULL(cond_val) = 0; VAL_INT(cond_val) = 0; if (DB_CAPABILITY(dp_conn->dp_dbf, DB_CAP_FETCH)) { if(dp_conn->dp_dbf.query(*dp_conn->dp_db_handle,cond_cols, 0,cond_val,query_cols,1, DP_TABLE_COL_NO, order, 0) < 0){ LM_ERR("failed to query database!\n"); goto err1; } no_rows = estimate_available_rows( 4+4+4+64+4+64+64+128, DP_TABLE_COL_NO); if (no_rows==0) no_rows = 10; if(dp_conn->dp_dbf.fetch_result(*dp_conn->dp_db_handle, &res, no_rows)<0) { LM_ERR("failed to fetch\n"); if (res) dp_conn->dp_dbf.free_result(*dp_conn->dp_db_handle, res); goto err1; } } else { /*select the whole table and all the columns*/ if(dp_conn->dp_dbf.query(*dp_conn->dp_db_handle, cond_cols,0,cond_val,query_cols,1, DP_TABLE_COL_NO, order, &res) < 0){ LM_ERR("failed to query database\n"); goto err1; } } nr_rows = RES_ROW_N(res); if(nr_rows == 0){ LM_WARN("no data in the db\n"); goto end; } do { for(i=0; i<RES_ROW_N(res); i++){ rows = RES_ROWS(res); values = ROW_VALUES(rows+i); if ((rule = build_rule(values)) == NULL) { LM_WARN(" failed to build rule -> skipping\n"); continue; } rule->table_id = i; if(add_rule2hash(rule , dp_conn, dp_conn->next_index) != 0) { LM_ERR("add_rule2hash failed\n"); goto err2; } } if (DB_CAPABILITY(dp_conn->dp_dbf, DB_CAP_FETCH)) { if(dp_conn->dp_dbf.fetch_result(*dp_conn->dp_db_handle, &res, no_rows)<0) { LM_ERR("failure while fetching!\n"); if (res) dp_conn->dp_dbf.free_result(*dp_conn->dp_db_handle, res); goto err1; } } else { break; } } while(RES_ROW_N(res)>0); end: /*update data*/ lock_start_write( dp_conn->ref_lock ); destroy_hash(&dp_conn->hash[dp_conn->crt_index]); dp_conn->crt_index = dp_conn->next_index; lock_stop_write( dp_conn->ref_lock ); list_hash(dp_conn->hash[dp_conn->crt_index], dp_conn->ref_lock); dp_conn->dp_dbf.free_result(*dp_conn->dp_db_handle, res); return 0; err1: lock_start_write( dp_conn->ref_lock ); dp_conn->next_index = dp_conn->crt_index; lock_stop_write( dp_conn->ref_lock ); return -1; err2: if(rule) destroy_rule(rule); destroy_hash(&dp_conn->hash[dp_conn->next_index]); dp_conn->dp_dbf.free_result(*dp_conn->dp_db_handle, res); lock_start_write( dp_conn->ref_lock ); dp_conn->next_index = dp_conn->crt_index; /* if lock defined - release the exclusive writing access */ lock_stop_write( dp_conn->ref_lock ); return -1; }