bool hash_key_list_compare(hash_type * hash1, hash_type * hash2) { bool has_equal_keylist; int i,size1, size2; char **keylist1, **keylist2; size1 = hash_get_size(hash1); size2 = hash_get_size(hash2); if(size1 != size2) return false; keylist1 = hash_alloc_key_sorted_list(hash1, &key_cmp); keylist2 = hash_alloc_key_sorted_list(hash2, &key_cmp); has_equal_keylist = true; for(i=0; i<size1; i++) { if(strcmp(keylist1[i],keylist2[i]) != 0) { has_equal_keylist = false; break; } } for(i=0; i<size1; i++) { free( keylist1[i] ); free( keylist2[i] ); } free( keylist1 ); free( keylist2 ); return has_equal_keylist; }
static hash_sort_type * hash_alloc_sort_list(const hash_type *hash , const char **keylist) { int i; hash_sort_type * sort_list = calloc(hash_get_size(hash) , sizeof * sort_list); for (i=0; i < hash_get_size(hash); i++) sort_list[i].key = util_alloc_string_copy(keylist[i]); return sort_list; }
int summary_key_set_get_size(summary_key_set_type * set) { int size; pthread_rwlock_rdlock( &set->rw_lock ); { size = hash_get_size( set->key_set ); } pthread_rwlock_unlock( &set->rw_lock ); return size; }
//get the size of the specified type dictionary. FRISO_API uint_t friso_spec_dic_size( friso_dic_t dic, friso_lex_t lex ) { if ( lex >= 0 && lex < __FRISO_LEXICON_LENGTH__ ) { return hash_get_size( dic[lex] ); } return 0; }
hash_iter_type * hash_iter_alloc(const hash_type * hash) { hash_iter_type * iter = util_malloc(sizeof * iter ); iter->hash = hash; iter->num_keys = hash_get_size(hash); iter->keylist = hash_alloc_keylist( (hash_type *) hash); hash_iter_restart( iter ); return iter; }
stringlist_type * hash_alloc_stringlist(hash_type * hash) { stringlist_type * stringlist = stringlist_alloc_new(); char ** keylist = hash_alloc_keylist__(hash , true); int i; for (i = 0; i < hash_get_size( hash ); i++) stringlist_append_owned_ref( stringlist , keylist[i] ); free( keylist ); return stringlist; }
//get size of the whole dictionary. FRISO_API uint_t friso_all_dic_size( friso_dic_t dic ) { register uint_t size = 0, t; for ( t = 0; t < __FRISO_LEXICON_LENGTH__; t++ ) { size += hash_get_size( dic[t] ); } return size; }
void sched_kw_gruptree_alloc_child_parent_list(const sched_kw_gruptree_type * kw, char *** __children, char *** __parents, int * num_pairs) { *num_pairs = hash_get_size(kw->gruptree_hash); char ** children = hash_alloc_keylist(kw->gruptree_hash); char ** parents = util_malloc(*num_pairs * sizeof * parents); for(int child_nr = 0; child_nr < *num_pairs; child_nr++) { parents[child_nr] = util_alloc_string_copy(hash_get_string(kw->gruptree_hash, children[child_nr])); } *__children = children; *__parents = parents; }
void hash_clear(hash_type *hash) { __hash_wrlock( hash ); { int old_size = hash_get_size(hash); if (old_size > 0) { char **keyList = hash_alloc_keylist__( hash , false); int i; for (i=0; i < old_size; i++) { hash_del_unlocked__(hash , keyList[i]); free(keyList[i]); } free(keyList); } } __hash_unlock( hash ); }
void sched_kw_gruptree_fprintf(const sched_kw_gruptree_type * kw, FILE * stream) { fprintf(stream, "GRUPTREE\n"); { const int num_keys = hash_get_size(kw->gruptree_hash); char ** child_list = hash_alloc_keylist(kw->gruptree_hash); int i; for (i = 0; i < num_keys; i++) { const char * parent_name = hash_get_string(kw->gruptree_hash , child_list[i]); fprintf(stream," '%s' '%s' /\n",child_list[i] , parent_name); } util_free_stringlist( child_list , num_keys ); } fprintf(stream,"/\n\n"); };
static bool custom_kw_config_read_data__(const custom_kw_config_type * config, const char * result_file, stringlist_type * result) { FILE * stream = util_fopen__(result_file, "r"); if (stream != NULL) { bool read_ok = true; stringlist_clear(result); stringlist_iset_ref(result, hash_get_size(config->custom_keys) - 1, NULL); hash_type * read_keys = hash_alloc(); char key[128]; char value[128]; int read_count; while ((read_count = fscanf(stream, "%s %s", key, value)) != EOF) { if (read_count == 1) { fprintf(stderr ,"[%s] Warning: Key: '%s:%s' missing value in file: %s!\n", __func__, config->name, key, result_file); read_ok = false; break; } if (custom_kw_config_has_key(config, key)) { if (hash_has_key(read_keys, key)) { fprintf(stderr ,"[%s] Warning: Key: '%s:%s' has appeared multiple times. Only the last occurrence will be used!\n", __func__, config->name, key); } hash_insert_int(read_keys, key, 1); int index = custom_kw_config_index_of_key(config, key); stringlist_iset_copy(result, index, value); } else { fprintf(stderr ,"[%s] Warning: Key: '%s:%s' not in the available set. Ignored!\n", __func__, config->name, key); } } fclose(stream); if (read_ok) { read_ok = hash_key_list_compare(read_keys, config->custom_keys); } return read_ok; } return false; }
int ext_joblist_get_size( const ext_joblist_type * joblist ) { return hash_get_size( joblist->jobs ); }
int summary_key_matcher_get_size(const summary_key_matcher_type * matcher) { return hash_get_size( matcher->key_set ); }
int config_get_schema_size( const config_parser_type * config ) { return hash_get_size( config->schema_items ); }
int local_dataset_get_size( const local_dataset_type * dataset ) { return hash_get_size( dataset->nodes ); }
/** Returns the number of distinct wells in RFT file. */ int ecl_rft_file_get_num_wells( const ecl_rft_file_type * rft_file ) { return hash_get_size( rft_file->well_index ); }
int ert_workflow_list_get_size( const ert_workflow_list_type * workflow_list) { return hash_get_size( workflow_list->workflows ) + hash_get_size( workflow_list->alias_map); }
int ensemble_config_get_size(const ensemble_config_type * ensemble_config ) { return hash_get_size( ensemble_config->config_nodes ); }
int set_get_size(const set_type *set) { return hash_get_size(set->key_hash); }
static void hash_free_sort_list(const hash_type *hash , hash_sort_type *sort_list) { int i; for (i=0; i < hash_get_size(hash); i++) free(sort_list[i].key); free(sort_list); }
int ranking_table_get_size( const ranking_table_type * ranking_table ) { return hash_get_size( ranking_table->ranking_table ); }
int custom_kw_config_size(const custom_kw_config_type * config) { return hash_get_size(config->custom_keys); }
int local_ministep_get_num_dataset( const local_ministep_type * ministep ) { return hash_get_size( ministep->datasets ); }