fsg_arciter_t * fsg_arciter_next(fsg_arciter_t * itor) { /* Iterate over non-null arcs first. */ if (itor->gn) { itor->gn = gnode_next(itor->gn); /* Move to the next destination arc. */ if (itor->gn == NULL) { itor->itor = hash_table_iter_next(itor->itor); if (itor->itor != NULL) itor->gn = hash_entry_val(itor->itor->ent); else if (itor->null_itor == NULL) goto stop_iteration; } } else { if (itor->null_itor == NULL) goto stop_iteration; itor->null_itor = hash_table_iter_next(itor->null_itor); if (itor->null_itor == NULL) goto stop_iteration; } return itor; stop_iteration: fsg_arciter_free(itor); return NULL; }
void jsgf_grammar_free(jsgf_t *jsgf) { /* FIXME: Probably should just use refcounting instead. */ if (jsgf->parent == NULL) { hash_iter_t *itor; gnode_t *gn; for (itor = hash_table_iter(jsgf->rules); itor; itor = hash_table_iter_next(itor)) { ckd_free((char *)itor->ent->key); jsgf_rule_free((jsgf_rule_t *)itor->ent->val); } hash_table_free(jsgf->rules); for (itor = hash_table_iter(jsgf->imports); itor; itor = hash_table_iter_next(itor)) { ckd_free((char *)itor->ent->key); jsgf_grammar_free((jsgf_t *)itor->ent->val); } hash_table_free(jsgf->imports); for (gn = jsgf->searchpath; gn; gn = gnode_next(gn)) ckd_free(gnode_ptr(gn)); glist_free(jsgf->searchpath); for (gn = jsgf->links; gn; gn = gnode_next(gn)) ckd_free(gnode_ptr(gn)); glist_free(jsgf->links); } ckd_free(jsgf->name); ckd_free(jsgf->version); ckd_free(jsgf->charset); ckd_free(jsgf->locale); ckd_free(jsgf); }
void print_broken_links (void) { hash_table_iterator iter; int num_elems; if (!nonexisting_urls_set) { logprintf (LOG_NOTQUIET, _("Found no broken links.\n\n")); return; } num_elems = hash_table_count (nonexisting_urls_set); assert (num_elems > 0); logprintf (LOG_NOTQUIET, ngettext("Found %d broken link.\n\n", "Found %d broken links.\n\n", num_elems), num_elems); for (hash_table_iterate (nonexisting_urls_set, &iter); hash_table_iter_next (&iter); ) { /* Struct url_list *list; */ const char *url = (const char *) iter.key; logprintf (LOG_NOTQUIET, _("%s\n"), url); } logputs (LOG_NOTQUIET, "\n"); }
int main(int argc, char *argv[]) { hash_table_t *h; hash_iter_t *itor; /* Test insertion */ TEST_ASSERT(h = hash_table_new(42, FALSE)); TEST_EQUAL((void*)0xdeadbeef, hash_table_enter(h, "foo", (void*)0xdeadbeef)); TEST_EQUAL((void*)0xdeadbeef, hash_table_enter(h, "foo", (void*)0xd0d0feed)); TEST_EQUAL((void*)0xcafec0de, hash_table_enter(h, "bar", (void*)0xcafec0de)); TEST_EQUAL((void*)0xeeefeeef, hash_table_enter(h, "baz", (void*)0xeeefeeef)); TEST_EQUAL((void*)0xbabababa, hash_table_enter(h, "quux", (void*)0xbabababa)); /* Now test iterators. */ for (itor = hash_table_iter(h); itor; itor = hash_table_iter_next(itor)) { printf("%s %p\n", itor->ent->key, itor->ent->val); if (0 == strcmp(itor->ent->key, "foo")) { TEST_EQUAL(itor->ent->val, (void*)0xdeadbeef); } else if (0 == strcmp(itor->ent->key, "bar")) { TEST_EQUAL(itor->ent->val, (void*)0xcafec0de); } else if (0 == strcmp(itor->ent->key, "baz")) { TEST_EQUAL(itor->ent->val, (void*)0xeeefeeef); } else if (0 == strcmp(itor->ent->key, "quux")) { TEST_EQUAL(itor->ent->val, (void*)0xbabababa); } } return 0; }
void test_hash_iterator_key_pair() { HashTable *hash_table; HashTableIterator iterator; HashTablePair pair; int *key = 0; int *val = 0; hash_table = hash_table_new(int_hash, int_equal); /* Add some values */ hash_table_insert(hash_table, &value1, &value1); hash_table_insert(hash_table, &value2, &value2); hash_table_iterate(hash_table, &iterator); while (hash_table_iter_has_more(&iterator)) { /* Retrieve both Key and Value */ pair = hash_table_iter_next(&iterator); key = (int*) pair.key; val = (int*) pair.value; assert(*key == *val); } hash_table_free(hash_table); }
int fsg_model_add_alt(fsg_model_t * fsg, char const *baseword, char const *altword) { int i, basewid, altwid; int ntrans; /* FIXME: This will get slow, eventually... */ for (basewid = 0; basewid < fsg->n_word; ++basewid) if (0 == strcmp(fsg->vocab[basewid], baseword)) break; if (basewid == fsg->n_word) { E_ERROR("Base word %s not present in FSG vocabulary!\n", baseword); return -1; } altwid = fsg_model_word_add(fsg, altword); if (fsg->altwords == NULL) fsg->altwords = bitvec_alloc(fsg->n_word_alloc); bitvec_set(fsg->altwords, altwid); E_DEBUG(2,("Adding alternate word transitions (%s,%s) to FSG\n", baseword, altword)); /* Look for all transitions involving baseword and duplicate them. */ /* FIXME: This will also get slow, eventually... */ ntrans = 0; for (i = 0; i < fsg->n_state; ++i) { hash_iter_t *itor; if (fsg->trans[i].trans == NULL) continue; for (itor = hash_table_iter(fsg->trans[i].trans); itor; itor = hash_table_iter_next(itor)) { glist_t trans; gnode_t *gn; trans = hash_entry_val(itor->ent); for (gn = trans; gn; gn = gnode_next(gn)) { fsg_link_t *fl = gnode_ptr(gn); if (fl->wid == basewid) { fsg_link_t *link; /* Create transition object */ link = listelem_malloc(fsg->link_alloc); link->from_state = fl->from_state; link->to_state = fl->to_state; link->logs2prob = fl->logs2prob; /* FIXME!!!??? */ link->wid = altwid; trans = glist_add_ptr(trans, (void *) link); ++ntrans; } } hash_entry_val(itor->ent) = trans; } } E_DEBUG(2,("Added %d alternate word transitions\n", ntrans)); return ntrans; }
void test_hash_table_iterating_remove(void) { HashTable *hash_table; HashTableIterator iterator; char buf[10]; char *val; HashTablePair pair; int count; unsigned int removed; int i; hash_table = generate_hash_table(); /* Iterate over all values in the table */ count = 0; removed = 0; hash_table_iterate(hash_table, &iterator); while (hash_table_iter_has_more(&iterator)) { /* Read the next value */ pair = hash_table_iter_next(&iterator); val = pair.value; /* Remove every hundredth entry */ if ((atoi(val) % 100) == 0) { hash_table_remove(hash_table, val); ++removed; } ++count; } /* Check counts */ assert(removed == 100); assert(count == NUM_TEST_VALUES); assert(hash_table_num_entries(hash_table) == NUM_TEST_VALUES - removed); /* Check all entries divisible by 100 were really removed */ for (i=0; i<NUM_TEST_VALUES; ++i) { sprintf(buf, "%i", i); if (i % 100 == 0) { assert(hash_table_lookup(hash_table, buf) == NULL); } else { assert(hash_table_lookup(hash_table, buf) != NULL); } } hash_table_free(hash_table); }
int ps_load_dict(ps_decoder_t *ps, char const *dictfile, char const *fdictfile, char const *format) { cmd_ln_t *newconfig; dict2pid_t *d2p; dict_t *dict; hash_iter_t *search_it; /* Create a new scratch config to load this dict (so existing one * won't be affected if it fails) */ newconfig = cmd_ln_init(NULL, ps_args(), TRUE, NULL); cmd_ln_set_boolean_r(newconfig, "-dictcase", cmd_ln_boolean_r(ps->config, "-dictcase")); cmd_ln_set_str_r(newconfig, "-dict", dictfile); if (fdictfile) cmd_ln_set_str_r(newconfig, "-fdict", fdictfile); else cmd_ln_set_str_r(newconfig, "-fdict", cmd_ln_str_r(ps->config, "-fdict")); /* Try to load it. */ if ((dict = dict_init(newconfig, ps->acmod->mdef, ps->acmod->lmath)) == NULL) { cmd_ln_free_r(newconfig); return -1; } /* Reinit the dict2pid. */ if ((d2p = dict2pid_build(ps->acmod->mdef, dict)) == NULL) { cmd_ln_free_r(newconfig); return -1; } /* Success! Update the existing config to reflect new dicts and * drop everything into place. */ cmd_ln_free_r(newconfig); cmd_ln_set_str_r(ps->config, "-dict", dictfile); if (fdictfile) cmd_ln_set_str_r(ps->config, "-fdict", fdictfile); dict_free(ps->dict); ps->dict = dict; dict2pid_free(ps->d2p); ps->d2p = d2p; /* And tell all searches to reconfigure themselves. */ for (search_it = hash_table_iter(ps->searches); search_it; search_it = hash_table_iter_next(search_it)) { if (ps_search_reinit(hash_entry_val(search_it->ent), dict, d2p) < 0) { hash_table_iter_free(search_it); return -1; } } return 0; }
void print_hash (struct hash_table *sht) { hash_table_iterator iter; int count = 0; for (hash_table_iterate (sht, &iter); hash_table_iter_next (&iter); ++count) printf ("%s: %s\n", iter.key, iter.value); assert (count == sht->count); }
void test_hash_table_iterating(void) { HashTable *hash_table; HashTableIterator iterator; int count; hash_table = generate_hash_table(); /* Iterate over all values in the table */ count = 0; hash_table_iterate(hash_table, &iterator); while (hash_table_iter_has_more(&iterator)) { hash_table_iter_next(&iterator); ++count; } assert(count == NUM_TEST_VALUES); /* Test iter_next after iteration has completed. */ assert(hash_table_iter_next(&iterator) == HASH_TABLE_NULL); hash_table_free(hash_table); /* Test iterating over an empty table */ hash_table = hash_table_new(int_hash, int_equal); hash_table_iterate(hash_table, &iterator); assert(hash_table_iter_has_more(&iterator) == 0); hash_table_free(hash_table); }
const char* ps_get_search(ps_decoder_t *ps) { hash_iter_t *search_it; const char* name = NULL; for (search_it = hash_table_iter(ps->searches); search_it; search_it = hash_table_iter_next(search_it)) { if (hash_entry_val(search_it->ent) == ps->search) { name = hash_entry_key(search_it->ent); break; } } return name; }
static void downloaded_files_free (void) { if (downloaded_files_hash) { hash_table_iterator iter; for (hash_table_iterate (downloaded_files_hash, &iter); hash_table_iter_next (&iter); ) xfree (iter.key); hash_table_destroy (downloaded_files_hash); downloaded_files_hash = NULL; } }
static void ps_free_searches(ps_decoder_t *ps) { if (ps->searches) { hash_iter_t *search_it; for (search_it = hash_table_iter(ps->searches); search_it; search_it = hash_table_iter_next(search_it)) { ps_search_free(hash_entry_val(search_it->ent)); } hash_table_free(ps->searches); } ps->searches = NULL; ps->search = NULL; }
void __la_dump_table(HashTable *table, __value_handler_t func) { HashTableIterator itr; LOGV("%p (hash table: dump table)", table); if (func == NULL) { LOGW("%p (dump hash table: given no handler)", table); return; } /* Print each value in detail */ hash_table_iterate(table, &itr); while (hash_table_iter_has_more(&itr)) { (*func)(hash_table_iter_next(&itr)); } }
void res_cleanup (void) { if (registered_specs) { hash_table_iterator iter; for (hash_table_iterate (registered_specs, &iter); hash_table_iter_next (&iter); ) { xfree (iter.key); free_specs (iter.value); } hash_table_destroy (registered_specs); registered_specs = NULL; } }
static void trans_list_free(fsg_model_t * fsg, int32 i) { hash_iter_t *itor; /* FIXME (maybe): FSG links will all get freed when we call * listelem_alloc_free() so don't bother freeing them explicitly * here. */ if (fsg->trans[i].trans) { for (itor = hash_table_iter(fsg->trans[i].trans); itor; itor = hash_table_iter_next(itor)) { glist_t gl = (glist_t) hash_entry_val(itor->ent); glist_free(gl); } } hash_table_free(fsg->trans[i].trans); hash_table_free(fsg->trans[i].null_trans); }
static void ps_free_searches(ps_decoder_t *ps) { if (ps->searches) { /* Release keys manually as we used ckd_salloc to add them, release every search too. */ hash_iter_t *search_it; for (search_it = hash_table_iter(ps->searches); search_it; search_it = hash_table_iter_next(search_it)) { ckd_free((char *) hash_entry_key(search_it->ent)); ps_search_free(hash_entry_val(search_it->ent)); } hash_table_empty(ps->searches); hash_table_free(ps->searches); } ps->searches = NULL; ps->search = NULL; }
void host_cleanup (void) { if (host_name_addresses_map) { hash_table_iterator iter; for (hash_table_iterate (host_name_addresses_map, &iter); hash_table_iter_next (&iter); ) { char *host = iter.key; struct address_list *al = iter.value; xfree (host); assert (al->refcount == 1); address_list_delete (al); } hash_table_destroy (host_name_addresses_map); host_name_addresses_map = NULL; } }
int batch_decoder_free(batch_decoder_t *bd) { hash_iter_t *itor; if (bd == NULL) return 0; if (bd->ctlfh != NULL) fclose(bd->ctlfh); if (bd->alignfh != NULL) fclose(bd->alignfh); if (bd->hypfh != NULL) fclose(bd->hypfh); cmd_ln_free_r(bd->config); search_free(bd->fwdtree); search_free(bd->fwdflat); //search_free(bd->latgen); search_factory_free(bd->sf); for (itor = hash_table_iter(bd->hypfiles); itor; itor = hash_table_iter_next(itor)) { fclose(hash_entry_val(itor->ent)); } hash_table_free(bd->hypfiles); ckd_free(bd); return 0; }
ps_search_iter_t * ps_search_iter_next(ps_search_iter_t *itor) { return (ps_search_iter_t *)hash_table_iter_next((hash_iter_t *)itor); }
fsg_model_t * fsg_model_read(FILE * fp, logmath_t * lmath, float32 lw) { fsg_model_t *fsg; hash_table_t *vocab; hash_iter_t *itor; int32 lastwid; char **wordptr; char *lineptr; char *fsgname; int32 lineno; int32 n, i, j; int n_state, n_trans, n_null_trans; glist_t nulls; float32 p; lineno = 0; vocab = hash_table_new(32, FALSE); wordptr = NULL; lineptr = NULL; nulls = NULL; fsgname = NULL; fsg = NULL; /* Scan upto FSG_BEGIN header */ for (;;) { n = nextline_str2words(fp, &lineno, &lineptr, &wordptr); if (n < 0) { E_ERROR("%s declaration missing\n", FSG_MODEL_BEGIN_DECL); goto parse_error; } if ((strcmp(wordptr[0], FSG_MODEL_BEGIN_DECL) == 0)) { if (n > 2) { E_ERROR("Line[%d]: malformed FSG_BEGIN declaration\n", lineno); goto parse_error; } break; } } /* Save FSG name, or it will get clobbered below :(. * If name is missing, try the default. */ if (n == 2) { fsgname = ckd_salloc(wordptr[1]); } else { E_WARN("FSG name is missing\n"); fsgname = ckd_salloc("unknown"); } /* Read #states */ n = nextline_str2words(fp, &lineno, &lineptr, &wordptr); if ((n != 2) || ((strcmp(wordptr[0], FSG_MODEL_N_DECL) != 0) && (strcmp(wordptr[0], FSG_MODEL_NUM_STATES_DECL) != 0)) || (sscanf(wordptr[1], "%d", &n_state) != 1) || (n_state <= 0)) { E_ERROR ("Line[%d]: #states declaration line missing or malformed\n", lineno); goto parse_error; } /* Now create the FSG. */ fsg = fsg_model_init(fsgname, lmath, lw, n_state); ckd_free(fsgname); fsgname = NULL; /* Read start state */ n = nextline_str2words(fp, &lineno, &lineptr, &wordptr); if ((n != 2) || ((strcmp(wordptr[0], FSG_MODEL_S_DECL) != 0) && (strcmp(wordptr[0], FSG_MODEL_START_STATE_DECL) != 0)) || (sscanf(wordptr[1], "%d", &(fsg->start_state)) != 1) || (fsg->start_state < 0) || (fsg->start_state >= fsg->n_state)) { E_ERROR ("Line[%d]: start state declaration line missing or malformed\n", lineno); goto parse_error; } /* Read final state */ n = nextline_str2words(fp, &lineno, &lineptr, &wordptr); if ((n != 2) || ((strcmp(wordptr[0], FSG_MODEL_F_DECL) != 0) && (strcmp(wordptr[0], FSG_MODEL_FINAL_STATE_DECL) != 0)) || (sscanf(wordptr[1], "%d", &(fsg->final_state)) != 1) || (fsg->final_state < 0) || (fsg->final_state >= fsg->n_state)) { E_ERROR ("Line[%d]: final state declaration line missing or malformed\n", lineno); goto parse_error; } /* Read transitions */ lastwid = 0; n_trans = n_null_trans = 0; for (;;) { int32 wid, tprob; n = nextline_str2words(fp, &lineno, &lineptr, &wordptr); if (n <= 0) { E_ERROR("Line[%d]: transition or FSG_END statement expected\n", lineno); goto parse_error; } if ((strcmp(wordptr[0], FSG_MODEL_END_DECL) == 0)) { break; } if ((strcmp(wordptr[0], FSG_MODEL_T_DECL) == 0) || (strcmp(wordptr[0], FSG_MODEL_TRANSITION_DECL) == 0)) { if (((n != 4) && (n != 5)) || (sscanf(wordptr[1], "%d", &i) != 1) || (sscanf(wordptr[2], "%d", &j) != 1) || (i < 0) || (i >= fsg->n_state) || (j < 0) || (j >= fsg->n_state)) { E_ERROR ("Line[%d]: transition spec malformed; Expecting: from-state to-state trans-prob [word]\n", lineno); goto parse_error; } p = atof_c(wordptr[3]); if ((p <= 0.0) || (p > 1.0)) { E_ERROR ("Line[%d]: transition spec malformed; Expecting float as transition probability\n", lineno); goto parse_error; } } else { E_ERROR("Line[%d]: transition or FSG_END statement expected\n", lineno); goto parse_error; } tprob = (int32) (logmath_log(lmath, p) * fsg->lw); /* Add word to "dictionary". */ if (n > 4) { if (hash_table_lookup_int32(vocab, wordptr[4], &wid) < 0) { (void) hash_table_enter_int32(vocab, ckd_salloc(wordptr[4]), lastwid); wid = lastwid; ++lastwid; } fsg_model_trans_add(fsg, i, j, tprob, wid); ++n_trans; } else { if (fsg_model_null_trans_add(fsg, i, j, tprob) == 1) { ++n_null_trans; nulls = glist_add_ptr(nulls, fsg_model_null_trans(fsg, i, j)); } } } E_INFO("FSG: %d states, %d unique words, %d transitions (%d null)\n", fsg->n_state, hash_table_inuse(vocab), n_trans, n_null_trans); /* Now create a string table from the "dictionary" */ fsg->n_word = hash_table_inuse(vocab); fsg->n_word_alloc = fsg->n_word + 10; /* Pad it a bit. */ fsg->vocab = ckd_calloc(fsg->n_word_alloc, sizeof(*fsg->vocab)); for (itor = hash_table_iter(vocab); itor; itor = hash_table_iter_next(itor)) { char const *word = hash_entry_key(itor->ent); int32 wid = (int32) (long) hash_entry_val(itor->ent); fsg->vocab[wid] = (char *) word; } hash_table_free(vocab); /* Do transitive closure on null transitions */ nulls = fsg_model_null_trans_closure(fsg, nulls); glist_free(nulls); ckd_free(lineptr); ckd_free(wordptr); return fsg; parse_error: for (itor = hash_table_iter(vocab); itor; itor = hash_table_iter_next(itor)) ckd_free((char *) hash_entry_key(itor->ent)); glist_free(nulls); hash_table_free(vocab); ckd_free(fsgname); ckd_free(lineptr); ckd_free(wordptr); fsg_model_free(fsg); return NULL; }
glist_t fsg_model_null_trans_closure(fsg_model_t * fsg, glist_t nulls) { gnode_t *gn1; int updated; fsg_link_t *tl1, *tl2; int32 k, n; E_INFO("Computing transitive closure for null transitions\n"); if (nulls == NULL) { fsg_link_t *null; int i, j; for (i = 0; i < fsg->n_state; ++i) { for (j = 0; j < fsg->n_state; ++j) { if ((null = fsg_model_null_trans(fsg, i, j))) nulls = glist_add_ptr(nulls, null); } } } /* * Probably not the most efficient closure implementation, in general, but * probably reasonably efficient for a sparse null transition matrix. */ n = 0; do { updated = FALSE; for (gn1 = nulls; gn1; gn1 = gnode_next(gn1)) { hash_iter_t *itor; tl1 = (fsg_link_t *) gnode_ptr(gn1); assert(tl1->wid < 0); if (fsg->trans[tl1->to_state].null_trans == NULL) continue; for (itor = hash_table_iter(fsg->trans[tl1->to_state].null_trans); itor; itor = hash_table_iter_next(itor)) { tl2 = (fsg_link_t *) hash_entry_val(itor->ent); k = fsg_model_null_trans_add(fsg, tl1->from_state, tl2->to_state, tl1->logs2prob + tl2->logs2prob); if (k >= 0) { updated = TRUE; if (k > 0) { nulls = glist_add_ptr(nulls, (void *) fsg_model_null_trans (fsg, tl1->from_state, tl2->to_state)); n++; } } } } } while (updated); E_INFO("%d null transitions added\n", n); return nulls; }
static int run_control_file(sphinx_wave2feat_t *wtf, char const *ctlfile) { hash_table_t *files; hash_iter_t *itor; lineiter_t *li; FILE *ctlfh; int nskip, runlen, npart, rv = 0; if ((ctlfh = fopen(ctlfile, "r")) == NULL) { E_ERROR_SYSTEM("Failed to open control file %s", ctlfile); return -1; } nskip = cmd_ln_int32_r(wtf->config, "-nskip"); runlen = cmd_ln_int32_r(wtf->config, "-runlen"); if ((npart = cmd_ln_int32_r(wtf->config, "-npart"))) { /* Count lines in the file. */ int partlen, part, nlines = 0; part = cmd_ln_int32_r(wtf->config, "-part"); for (li = lineiter_start(ctlfh); li; li = lineiter_next(li)) ++nlines; fseek(ctlfh, 0, SEEK_SET); partlen = nlines / npart; nskip = partlen * (part - 1); if (part == npart) runlen = -1; else runlen = partlen; } if (runlen != -1){ E_INFO("Processing %d utterances at position %d\n", runlen, nskip); files = hash_table_new(runlen, HASH_CASE_YES); } else { E_INFO("Processing all remaining utterances at position %d\n", nskip); files = hash_table_new(1000, HASH_CASE_YES); } for (li = lineiter_start(ctlfh); li; li = lineiter_next(li)) { char *c, *infile, *outfile; if (nskip-- > 0) continue; if (runlen == 0) { lineiter_free(li); break; } --runlen; string_trim(li->buf, STRING_BOTH); /* Extract the file ID from the control line. */ if ((c = strchr(li->buf, ' ')) != NULL) *c = '\0'; if (strlen(li->buf) == 0) { E_WARN("Empty line %d in control file, skipping\n", li->lineno); continue; } build_filenames(wtf->config, li->buf, &infile, &outfile); if (hash_table_lookup(files, infile, NULL) == 0) continue; rv = sphinx_wave2feat_convert_file(wtf, infile, outfile); hash_table_enter(files, infile, outfile); if (rv != 0) { lineiter_free(li); break; } } for (itor = hash_table_iter(files); itor; itor = hash_table_iter_next(itor)) { ckd_free((void *)hash_entry_key(itor->ent)); ckd_free(hash_entry_val(itor->ent)); } hash_table_free(files); if (fclose(ctlfh) == EOF) E_ERROR_SYSTEM("Failed to close control file"); return rv; }
jsgf_rule_t * jsgf_import_rule(jsgf_t * jsgf, char *name) { char *c, *path, *newpath; size_t namelen, packlen; void *val; jsgf_t *imp; int import_all; /* Trim the leading and trailing <> */ namelen = strlen(name); path = ckd_malloc(namelen - 2 + 6); /* room for a trailing .gram */ strcpy(path, name + 1); /* Split off the first part of the name */ c = strrchr(path, '.'); if (c == NULL) { E_ERROR("Imported rule is not qualified: %s\n", name); ckd_free(path); return NULL; } packlen = c - path; *c = '\0'; /* Look for import foo.* */ import_all = (strlen(name) > 2 && 0 == strcmp(name + namelen - 3, ".*>")); /* Construct a filename. */ for (c = path; *c; ++c) if (*c == '.') *c = '/'; strcat(path, ".gram"); newpath = path_list_search(jsgf->searchpath, path); if (newpath == NULL) { E_ERROR("Failed to find grammar %s\n", path); ckd_free(path); return NULL; } ckd_free(path); path = newpath; E_INFO("Importing %s from %s to %s\n", name, path, jsgf->name); /* FIXME: Also, we need to make sure that path is fully qualified * here, by adding any prefixes from jsgf->name to it. */ /* See if we have parsed it already */ if (hash_table_lookup(jsgf->imports, path, &val) == 0) { E_INFO("Already imported %s\n", path); imp = val; ckd_free(path); } else { /* If not, parse it. */ imp = jsgf_parse_file(path, jsgf); val = hash_table_enter(jsgf->imports, path, imp); if (val != (void *) imp) { E_WARN("Multiply imported file: %s\n", path); } } if (imp != NULL) { hash_iter_t *itor; /* Look for public rules matching rulename. */ for (itor = hash_table_iter(imp->rules); itor; itor = hash_table_iter_next(itor)) { hash_entry_t *he = itor->ent; jsgf_rule_t *rule = hash_entry_val(he); int rule_matches; char *rule_name = importname2rulename(name); if (import_all) { /* Match package name (symbol table is shared) */ rule_matches = !strncmp(rule_name, rule->name, packlen + 1); } else { /* Exact match */ rule_matches = !strcmp(rule_name, rule->name); } ckd_free(rule_name); if (rule->is_public && rule_matches) { void *val; char *newname; /* Link this rule into the current namespace. */ c = strrchr(rule->name, '.'); assert(c != NULL); newname = jsgf_fullname(jsgf, c); E_INFO("Imported %s\n", newname); val = hash_table_enter(jsgf->rules, newname, jsgf_rule_retain(rule)); if (val != (void *) rule) { E_WARN("Multiply defined symbol: %s\n", newname); } if (!import_all) { hash_table_iter_free(itor); return rule; } } } } return NULL; }
int main(int arg, char *argv) { char * str1 = "HTTP/1.1 index.html"; char * str2 = "HTTP/1.0 aaaaa.html"; char * str3 = "HTTP/1.0 bbbbb.html"; char * str4 = "HTTP/1.1 ccccc.html"; char * str5 = "HTTP/0.9 ddddd.html"; char * str6 = "HTTP/1.1 fffff.html"; char * str7 = "HTTP/0.9 eeeee.html"; LST_String * nbytes1 = lst_string_new(str1, 1, strlen(str1)); LST_String * nbytes2 = lst_string_new(str2, 1, strlen(str2)); LST_String * nbytes3 = lst_string_new(str3, 1, strlen(str3)); LST_String * nbytes4 = lst_string_new(str4, 1, strlen(str4)); LST_String * nbytes5 = lst_string_new(str5, 1, strlen(str5)); LST_String * nbytes6 = lst_string_new(str6, 1, strlen(str6)); LST_String * nbytes7 = lst_string_new(str7, 1, strlen(str7)); LST_StringSet * set = lst_stringset_new(); lst_stringset_add(set, nbytes1); lst_stringset_add(set, nbytes2); lst_stringset_add(set, nbytes3); lst_stringset_add(set, nbytes4); lst_stringset_add(set, nbytes5); lst_stringset_add(set, nbytes6); lst_stringset_add(set, nbytes7); int first_bytes = 8; int last_bytes = 10; int num_bytes = 19; int gamma_merge = 2; product_distribution_t * pd = product_distribution_new(set, first_bytes, last_bytes, num_bytes); /* print pd */ HashTableIterator iterator1; hash_table_iterate(pd->offset_distribution, &iterator1); while (hash_table_iter_has_more(&iterator1)){ HashTablePair pair1 = hash_table_iter_next(&iterator1); int *key1 = (int *) pair1.key; byte_distribution_t *value1 = (byte_distribution_t *) pair1.value; HashTableIterator iterator2; hash_table_iterate(value1->value_frequency, &iterator2); if (hash_table_num_entries(value1->value_frequency) > gamma_merge) { continue; } printf("offset %d : ", *key1); while(hash_table_iter_has_more(&iterator2)){ HashTablePair pair2 = hash_table_iter_next(&iterator2); char *key2 = (char *) pair2.key; int *value2 = (int *) pair2.value; //printf("<%c, %d>\t", key2[0], *value2); if (0 == *key1) { printf("^%s\t", key2); } else if (num_bytes - 1 == *key1) { printf("%s$\t", key2); } else { printf("%s\t", key2); } } printf("\n"); } product_distribution_free(pd); return 0; }
glist_t fsg_model_null_trans_closure(fsg_model_t * fsg, glist_t nulls) { gnode_t *gn1; int updated; fsg_link_t *tl1, *tl2; int32 k, n; E_INFO("Computing transitive closure for null transitions\n"); /* If our caller didn't give us a list of null-transitions, make such a list. Just loop through all the FSG states, and all the null-transitions in that state (which are kept in their own hash table). */ if (nulls == NULL) { int i; for (i = 0; i < fsg->n_state; ++i) { hash_iter_t *itor; hash_table_t *null_trans = fsg->trans[i].null_trans; if (null_trans == NULL) continue; for (itor = hash_table_iter(null_trans); itor != NULL; itor = hash_table_iter_next(itor)) { nulls = glist_add_ptr(nulls, hash_entry_val(itor->ent)); } } } /* * Probably not the most efficient closure implementation, in general, but * probably reasonably efficient for a sparse null transition matrix. */ n = 0; do { updated = FALSE; for (gn1 = nulls; gn1; gn1 = gnode_next(gn1)) { hash_iter_t *itor; tl1 = (fsg_link_t *) gnode_ptr(gn1); assert(tl1->wid < 0); if (fsg->trans[tl1->to_state].null_trans == NULL) continue; for (itor = hash_table_iter(fsg->trans[tl1->to_state].null_trans); itor; itor = hash_table_iter_next(itor)) { tl2 = (fsg_link_t *) hash_entry_val(itor->ent); k = fsg_model_null_trans_add(fsg, tl1->from_state, tl2->to_state, tl1->logs2prob + tl2->logs2prob); if (k >= 0) { updated = TRUE; if (k > 0) { nulls = glist_add_ptr(nulls, (void *) fsg_model_null_trans (fsg, tl1->from_state, tl2->to_state)); n++; } } } } } while (updated); E_INFO("%d null transitions added\n", n); return nulls; }
int ps_add_word(ps_decoder_t *ps, char const *word, char const *phones, int update) { int32 wid; s3cipid_t *pron; hash_iter_t *search_it; char **phonestr, *tmp; int np, i, rv; /* Parse phones into an array of phone IDs. */ tmp = ckd_salloc(phones); np = str2words(tmp, NULL, 0); phonestr = ckd_calloc(np, sizeof(*phonestr)); str2words(tmp, phonestr, np); pron = ckd_calloc(np, sizeof(*pron)); for (i = 0; i < np; ++i) { pron[i] = bin_mdef_ciphone_id(ps->acmod->mdef, phonestr[i]); if (pron[i] == -1) { E_ERROR("Unknown phone %s in phone string %s\n", phonestr[i], tmp); ckd_free(phonestr); ckd_free(tmp); ckd_free(pron); return -1; } } /* No longer needed. */ ckd_free(phonestr); ckd_free(tmp); /* Add it to the dictionary. */ if ((wid = dict_add_word(ps->dict, word, pron, np)) == -1) { ckd_free(pron); return -1; } /* No longer needed. */ ckd_free(pron); /* Now we also have to add it to dict2pid. */ dict2pid_add_word(ps->d2p, wid); /* TODO: we definitely need to refactor this */ for (search_it = hash_table_iter(ps->searches); search_it; search_it = hash_table_iter_next(search_it)) { ps_search_t *search = hash_entry_val(search_it->ent); if (!strcmp(PS_SEARCH_NGRAM, ps_search_name(search))) { ngram_model_t *lmset = ((ngram_search_t *) search)->lmset; if (ngram_model_add_word(lmset, word, 1.0) == NGRAM_INVALID_WID) { hash_table_iter_free(search_it); return -1; } } if (update) { if ((rv = ps_search_reinit(search, ps->dict, ps->d2p) < 0)) { hash_table_iter_free(search_it); return rv; } } } /* Rebuild the widmap and search tree if requested. */ return wid; }