static void vithist_lmstate_reset(vithist_t * vh) { gnode_t *lgn, *gn; int32 i; vh_lms2vh_t *lms2vh, *child; for (lgn = vh->lwidlist; lgn; lgn = gnode_next(lgn)) { i = (int32) gnode_int32(lgn); lms2vh = vh->lms2vh_root[i]; for (gn = lms2vh->children; gn; gn = gnode_next(gn)) { child = (vh_lms2vh_t *) gnode_ptr(gn); ckd_free((void *) child); } glist_free(lms2vh->children); ckd_free((void *) lms2vh); vh->lms2vh_root[i] = NULL; } glist_free(vh->lwidlist); vh->lwidlist = NULL; }
static void vithist_lmstate_dump (vithist_t *vh, kbcore_t *kbc, FILE *fp) { glist_t gl; gnode_t *lgn, *gn; int32 i; vh_lmstate2vithist_t *lms2vh; mdef_t *mdef; lm_t *lm; mdef = kbcore_mdef (kbc); lm = kbcore_lm (kbc); fprintf (fp, "LMSTATE\n"); for (lgn = vh->lwidlist; lgn; lgn = gnode_next(lgn)) { i = (int32) gnode_int32 (lgn); gl = vh->lmstate_root[i]; assert (gl); for (gn = gl; gn; gn = gnode_next(gn)) { lms2vh = (vh_lmstate2vithist_t *) gnode_ptr (gn); fprintf (fp, "\t%s.%s -> %d\n", lm_wordstr(lm, i), mdef_ciphone_str (mdef, lms2vh->state), lms2vh->vhid); vithist_lmstate_subtree_dump (vh, kbc, lms2vh, 1, fp); } } fprintf (fp, "END_LMSTATE\n"); fflush (fp); }
void jsgf_grammar_free(jsgf_t *jsgf) { /* FIXME: Probably should just use refcounting instead. */ if (jsgf->parent == NULL) { hash_iter_t *itor; gnode_t *gn; for (itor = hash_table_iter(jsgf->rules); itor; itor = hash_table_iter_next(itor)) { ckd_free((char *)itor->ent->key); jsgf_rule_free((jsgf_rule_t *)itor->ent->val); } hash_table_free(jsgf->rules); for (itor = hash_table_iter(jsgf->imports); itor; itor = hash_table_iter_next(itor)) { ckd_free((char *)itor->ent->key); jsgf_grammar_free((jsgf_t *)itor->ent->val); } hash_table_free(jsgf->imports); for (gn = jsgf->searchpath; gn; gn = gnode_next(gn)) ckd_free(gnode_ptr(gn)); glist_free(jsgf->searchpath); for (gn = jsgf->links; gn; gn = gnode_next(gn)) ckd_free(gnode_ptr(gn)); glist_free(jsgf->links); } ckd_free(jsgf->name); ckd_free(jsgf->version); ckd_free(jsgf->charset); ckd_free(jsgf->locale); ckd_free(jsgf); }
ngram_class_t * ngram_class_new(ngram_model_t * model, int32 tag_wid, int32 start_wid, glist_t classwords) { ngram_class_t *lmclass; gnode_t *gn; float32 tprob; int i; lmclass = ckd_calloc(1, sizeof(*lmclass)); lmclass->tag_wid = tag_wid; /* wid_base is the wid (minus class tag) of the first word in the list. */ lmclass->start_wid = start_wid; lmclass->n_words = glist_count(classwords); lmclass->prob1 = ckd_calloc(lmclass->n_words, sizeof(*lmclass->prob1)); lmclass->nword_hash = NULL; lmclass->n_hash = 0; tprob = 0.0; for (gn = classwords; gn; gn = gnode_next(gn)) { tprob += gnode_float32(gn); } if (tprob > 1.1 || tprob < 0.9) { E_INFO("Total class probability is %f, will normalize\n", tprob); for (gn = classwords; gn; gn = gnode_next(gn)) { gn->data.fl /= tprob; } } for (i = 0, gn = classwords; gn; ++i, gn = gnode_next(gn)) { lmclass->prob1[i] = logmath_log(model->lmath, gnode_float32(gn)); } return lmclass; }
static fsg_model_t * jsgf_build_fsg_internal(jsgf_t * grammar, jsgf_rule_t * rule, logmath_t * lmath, float32 lw, int do_closure) { fsg_model_t *fsg; glist_t nulls; gnode_t *gn; int rule_entry, rule_exit; /* Clear previous links */ for (gn = grammar->links; gn; gn = gnode_next(gn)) { ckd_free(gnode_ptr(gn)); } glist_free(grammar->links); grammar->links = NULL; grammar->nstate = 0; /* Create the top-level entry state, and expand the top-level rule. */ rule_entry = grammar->nstate++; rule_exit = expand_rule(grammar, rule, rule_entry, NO_NODE); /* If no exit-state was created, create one. */ if (rule_exit == NO_NODE) { rule_exit = grammar->nstate++; jsgf_add_link(grammar, NULL, rule_entry, rule_exit); } fsg = fsg_model_init(rule->name, lmath, lw, grammar->nstate); fsg->start_state = rule_entry; fsg->final_state = rule_exit; grammar->links = glist_reverse(grammar->links); for (gn = grammar->links; gn; gn = gnode_next(gn)) { jsgf_link_t *link = gnode_ptr(gn); if (link->atom) { if (jsgf_atom_is_rule(link->atom)) { fsg_model_null_trans_add(fsg, link->from, link->to, logmath_log(lmath, link->atom->weight)); } else { int wid = fsg_model_word_add(fsg, link->atom->name); fsg_model_trans_add(fsg, link->from, link->to, logmath_log(lmath, link->atom->weight), wid); } } else { fsg_model_null_trans_add(fsg, link->from, link->to, 0); } } if (do_closure) { nulls = fsg_model_null_trans_closure(fsg, NULL); glist_free(nulls); } return fsg; }
void matchseg_write(FILE * fp, glist_t hyp, char *uttid, char *hdr, lm_t * lm, dict_t * dict, int32 num_frm, int32 * ascale, int32 unnorm) { gnode_t *gn; srch_hyp_t *h; int32 ascr, lscr, scl, hypscale, global_hypscale; int32 i; if (fp == NULL) return; ascr = 0; lscr = 0; scl = 0; hypscale = 0; global_hypscale = 0; for (gn = hyp; gn; gn = gnode_next(gn)) { h = (srch_hyp_t *) gnode_ptr(gn); if (h->sf != h->ef) { /* FSG outputs zero-width hyps */ ascr += h->ascr; lscr += lm ? lm_rawscore(lm, h->lscr) : h->lscr; if (unnorm) global_hypscale += compute_scale(h->sf, h->ef, ascale); } } for (i = 0; i < num_frm; i++) scl += ascale[i]; fprintf(fp, "%s%s S %d T %d A %d L %d", (hdr ? hdr : ""), uttid, scl, ascr + lscr + global_hypscale, ascr + global_hypscale, lscr); for (gn = hyp; gn; gn = gnode_next(gn)) { h = (srch_hyp_t *) gnode_ptr(gn); if (h->sf != h->ef) { /* FSG outputs zero-width hyps */ hypscale = 0; if (unnorm) hypscale += compute_scale(h->sf, h->ef, ascale); fprintf(fp, " %d %d %d %s", h->sf, h->ascr + hypscale, lm ? lm_rawscore(lm, h->lscr) : h->lscr, dict_wordstr(dict, h->id)); } } fprintf(fp, " %d\n", num_frm); fflush(fp); }
/* * Obtain transitive closure of NULL transitions in the given FSG. (Initial * list of such transitions is given.) * Return value: Updated list of null transitions. */ static glist_t word_fsg_null_trans_closure(word_fsg_t * fsg, glist_t nulls) { gnode_t *gn1, *gn2; int updated; word_fsglink_t *tl1, *tl2; int32 k, n; E_INFO("Computing transitive closure for null transitions\n"); /* * Probably not the most efficient closure implementation, in general, but * probably reasonably efficient for a sparse null transition matrix. */ n = 0; do { updated = FALSE; for (gn1 = nulls; gn1; gn1 = gnode_next(gn1)) { tl1 = (word_fsglink_t *) gnode_ptr(gn1); assert(tl1->wid < 0); for (gn2 = nulls; gn2; gn2 = gnode_next(gn2)) { tl2 = (word_fsglink_t *) gnode_ptr(gn2); if (tl1->to_state == tl2->from_state) { k = word_fsg_null_trans_add(fsg, tl1->from_state, tl2->to_state, tl1->logs2prob + tl2->logs2prob); if (k >= 0) { updated = TRUE; if (k > 0) { nulls = glist_add_ptr(nulls, (void *) fsg-> null_trans[tl1-> from_state][tl2-> to_state]); n++; } } } } } } while (updated); E_INFO("%d null transitions added\n", n); return nulls; }
int32 live_get_partialhyp(int32 endutt) { int32 id, nwds; glist_t hyp; gnode_t *gn; hyp_t *h; dict_t *dict; dict = kbcore_dict (kb->kbcore); if (endutt) id = vithist_utt_end(kb->vithist, kb->kbcore); else id = vithist_partialutt_end(kb->vithist, kb->kbcore); if (id > 0) { hyp = vithist_backtrace(kb->vithist,id); for (gn = hyp,nwds=0; gn; gn = gnode_next(gn),nwds++) { h = (hyp_t *) gnode_ptr (gn); if (parthyp[nwds].word != NULL) { ckd_free(parthyp[nwds].word); parthyp[nwds].word = NULL; } parthyp[nwds].word = strdup(dict_wordstr(dict, h->id)); parthyp[nwds].sf = h->sf; parthyp[nwds].ef = h->ef; parthyp[nwds].ascr = h->ascr; parthyp[nwds].lscr = h->lscr; } if (parthyp[nwds].word != NULL){ ckd_free(parthyp[nwds].word); parthyp[nwds].word = NULL; } /* Free hyplist */ for (gn = hyp; gn && (gnode_next(gn)); gn = gnode_next(gn)) { h = (hyp_t *) gnode_ptr (gn); ckd_free ((void *) h); } glist_free (hyp); } else { nwds = 0; if (parthyp[nwds].word != NULL) { ckd_free(parthyp[nwds].word); parthyp[nwds].word = NULL; } } return(nwds); }
fsg_arciter_t * fsg_arciter_next(fsg_arciter_t * itor) { /* Iterate over non-null arcs first. */ if (itor->gn) { itor->gn = gnode_next(itor->gn); /* Move to the next destination arc. */ if (itor->gn == NULL) { itor->itor = hash_table_iter_next(itor->itor); if (itor->itor != NULL) itor->gn = hash_entry_val(itor->itor->ent); else if (itor->null_itor == NULL) goto stop_iteration; } } else { if (itor->null_itor == NULL) goto stop_iteration; itor->null_itor = hash_table_iter_next(itor->null_itor); if (itor->null_itor == NULL) goto stop_iteration; } return itor; stop_iteration: fsg_arciter_free(itor); return NULL; }
int cmd_ln_free_r(cmd_ln_t *cmdln) { if (cmdln == NULL) return 0; if (--cmdln->refcount > 0) return cmdln->refcount; if (cmdln->ht) { glist_t entries; gnode_t *gn; int32 n; entries = hash_table_tolist(cmdln->ht, &n); for (gn = entries; gn; gn = gnode_next(gn)) { hash_entry_t *e = gnode_ptr(gn); cmd_ln_val_free((cmd_ln_val_t *)e->val); } glist_free(entries); hash_table_free(cmdln->ht); cmdln->ht = NULL; } if (cmdln->f_argv) { int32 i; for (i = 0; i < cmdln->f_argc; ++i) { ckd_free(cmdln->f_argv[i]); } ckd_free(cmdln->f_argv); cmdln->f_argv = NULL; cmdln->f_argc = 0; } ckd_free(cmdln); return 0; }
glist_t vithist_sort (glist_t vithist_list) { heap_t heap; gnode_t *gn; vithist_t *h; glist_t vithist_new; int32 ret, score; vithist_new = NULL; heap = heap_new(); for (gn = vithist_list; gn; gn = gnode_next(gn)) { h = (vithist_t *) gnode_ptr(gn); if (heap_insert (heap, (void *) h, h->scr) < 0) { E_ERROR("Panic: heap_insert() failed\n"); return NULL; } } /* * Note: The heap returns nodes with ASCENDING values; and glist_add adds new nodes to the * HEAD of the list. So we get a glist in the desired descending score order. */ while ((ret = heap_pop (heap, (void **)(&h), &score)) > 0) vithist_new = glist_add_ptr (vithist_new, (void *)h); if (ret < 0) { E_ERROR("Panic: heap_pop() failed\n"); return NULL; } heap_destroy (heap); return vithist_new; }
int fsg_model_add_alt(fsg_model_t * fsg, char const *baseword, char const *altword) { int i, basewid, altwid; int ntrans; /* FIXME: This will get slow, eventually... */ for (basewid = 0; basewid < fsg->n_word; ++basewid) if (0 == strcmp(fsg->vocab[basewid], baseword)) break; if (basewid == fsg->n_word) { E_ERROR("Base word %s not present in FSG vocabulary!\n", baseword); return -1; } altwid = fsg_model_word_add(fsg, altword); if (fsg->altwords == NULL) fsg->altwords = bitvec_alloc(fsg->n_word_alloc); bitvec_set(fsg->altwords, altwid); E_DEBUG(2,("Adding alternate word transitions (%s,%s) to FSG\n", baseword, altword)); /* Look for all transitions involving baseword and duplicate them. */ /* FIXME: This will also get slow, eventually... */ ntrans = 0; for (i = 0; i < fsg->n_state; ++i) { hash_iter_t *itor; if (fsg->trans[i].trans == NULL) continue; for (itor = hash_table_iter(fsg->trans[i].trans); itor; itor = hash_table_iter_next(itor)) { glist_t trans; gnode_t *gn; trans = hash_entry_val(itor->ent); for (gn = trans; gn; gn = gnode_next(gn)) { fsg_link_t *fl = gnode_ptr(gn); if (fl->wid == basewid) { fsg_link_t *link; /* Create transition object */ link = listelem_malloc(fsg->link_alloc); link->from_state = fl->from_state; link->to_state = fl->to_state; link->logs2prob = fl->logs2prob; /* FIXME!!!??? */ link->wid = altwid; trans = glist_add_ptr(trans, (void *) link); ++ntrans; } } hash_entry_val(itor->ent) = trans; } } E_DEBUG(2,("Added %d alternate word transitions\n", ntrans)); return ntrans; }
void word_fsg_free(word_fsg_t * fsg) { int32 i, j; gnode_t *gn; word_fsglink_t *tl; for (i = 0; i < fsg->n_state; i++) { for (j = 0; j < fsg->n_state; j++) { /* Free all non-null transitions between states i and j */ for (gn = fsg->trans[i][j]; gn; gn = gnode_next(gn)) { tl = (word_fsglink_t *) gnode_ptr(gn); ckd_free((void *) tl); } glist_free(fsg->trans[i][j]); /* Free any null transition i->j */ ckd_free((void *) fsg->null_trans[i][j]); } } ctxt_table_free(fsg->ctxt); ckd_free_2d((void **) fsg->trans); ckd_free_2d((void **) fsg->null_trans); ckd_free((void *) fsg->name); if (fsg->lc) ckd_free_2d((void **) fsg->lc); if (fsg->rc) ckd_free_2d((void **) fsg->rc); ckd_free((void *) fsg); }
void kws_detections_add(kws_detections_t *detections, const char* keyphrase, int sf, int ef, int prob, int ascr) { gnode_t *gn; kws_detection_t* detection; for (gn = detections->detect_list; gn; gn = gnode_next(gn)) { kws_detection_t *det = (kws_detection_t *)gnode_ptr(gn); if (strcmp(keyphrase, det->keyphrase) == 0 && det->sf < ef && det->ef > sf) { if (det->prob < prob) { det->sf = sf; det->ef = ef; det->prob = prob; det->ascr = ascr; } return; } } /* Nothing found */ detection = (kws_detection_t *)ckd_calloc(1, sizeof(*detection)); detection->sf = sf; detection->ef = ef; detection->keyphrase = keyphrase; detection->prob = prob; detection->ascr = ascr; detections->detect_list = glist_add_ptr(detections->detect_list, detection); }
void match_write(FILE * fp, glist_t hyp, char *uttid, dict_t * dict, char *hdr) { gnode_t *gn; srch_hyp_t *h; int counter = 0; if (fp == NULL) return; if (hyp == NULL) /* Following s3.0 convention */ fprintf(fp, "(null)"); fprintf(fp, "%s", (hdr ? hdr : "")); /* for (gn = hyp; gn && (gnode_next(gn)); gn = gnode_next(gn)) { */ for (gn = hyp; gn; gn = gnode_next(gn)) { h = (srch_hyp_t *) gnode_ptr(gn); if (h->sf != h->ef) { /* FSG outputs zero-width hyps */ if ((!dict_filler_word(dict, h->id)) && (h->id != dict_finishwid(dict)) && (h->id != dict_startwid(dict))) fprintf(fp, "%s ", dict_wordstr(dict, dict_basewid(dict, h->id))); counter++; } } if (counter == 0) fprintf(fp, " "); fprintf(fp, "(%s)\n", uttid); fflush(fp); }
/* * Add the given transition to the FSG transition matrix. Duplicates (i.e., * two transitions between the same states, with the same word label) are * flagged and only the highest prob retained. */ static void word_fsg_trans_add(word_fsg_t * fsg, int32 from, int32 to, int32 logp, int32 wid) { word_fsglink_t *link; gnode_t *gn; /* Check for duplicate link (i.e., link already exists with label=wid) */ for (gn = fsg->trans[from][to]; gn; gn = gnode_next(gn)) { link = (word_fsglink_t *) gnode_ptr(gn); if (link->wid == wid) { #if 0 E_WARN ("Duplicate transition %d -> %d ('%s'); highest prob kept\n", from, to, dict_wordstr(fsg->dict, wid)); #endif if (link->logs2prob < logp) link->logs2prob = logp; return; } } /* Create transition object */ link = (word_fsglink_t *) ckd_calloc(1, sizeof(word_fsglink_t)); link->from_state = from; link->to_state = to; link->logs2prob = logp; link->wid = wid; fsg->trans[from][to] = glist_add_ptr(fsg->trans[from][to], (void *) link); }
void fsg_model_trans_add(fsg_model_t * fsg, int32 from, int32 to, int32 logp, int32 wid) { fsg_link_t *link; glist_t gl; gnode_t *gn; if (fsg->trans[from].trans == NULL) fsg->trans[from].trans = hash_table_new(5, HASH_CASE_YES); /* Check for duplicate link (i.e., link already exists with label=wid) */ for (gn = gl = fsg_model_trans(fsg, from, to); gn; gn = gnode_next(gn)) { link = (fsg_link_t *) gnode_ptr(gn); if (link->wid == wid) { if (link->logs2prob < logp) link->logs2prob = logp; return; } } /* Create transition object */ link = listelem_malloc(fsg->link_alloc); link->from_state = from; link->to_state = to; link->logs2prob = logp; link->wid = wid; /* Add it to the list of transitions and update the hash table */ gl = glist_add_ptr(gl, (void *) link); hash_table_replace_bkey(fsg->trans[from].trans, (char const *) &link->to_state, sizeof(link->to_state), gl); }
static fsg_model_t * jsgf_build_fsg_internal(jsgf_t *grammar, jsgf_rule_t *rule, logmath_t *lmath, float32 lw, int do_closure) { fsg_model_t *fsg; glist_t nulls; gnode_t *gn; /* Clear previous links */ for (gn = grammar->links; gn; gn = gnode_next(gn)) { ckd_free(gnode_ptr(gn)); } glist_free(grammar->links); grammar->links = NULL; rule->entry = rule->exit = 0; grammar->nstate = 0; expand_rule(grammar, rule); fsg = fsg_model_init(rule->name, lmath, lw, grammar->nstate); fsg->start_state = rule->entry; fsg->final_state = rule->exit; grammar->links = glist_reverse(grammar->links); for (gn = grammar->links; gn; gn = gnode_next(gn)) { jsgf_link_t *link = gnode_ptr(gn); if (link->atom) { if (jsgf_atom_is_rule(link->atom)) { fsg_model_null_trans_add(fsg, link->from, link->to, logmath_log(lmath, link->atom->weight)); } else { int wid = fsg_model_word_add(fsg, link->atom->name); fsg_model_trans_add(fsg, link->from, link->to, logmath_log(lmath, link->atom->weight), wid); } } else { fsg_model_null_trans_add(fsg, link->from, link->to, 0); } } if (do_closure) { nulls = fsg_model_null_trans_closure(fsg, NULL); glist_free(nulls); } return fsg; }
static void phone_loop_search_free_renorm(phone_loop_search_t *pls) { gnode_t *gn; for (gn = pls->renorm; gn; gn = gnode_next(gn)) ckd_free(gnode_ptr(gn)); glist_free(pls->renorm); pls->renorm = NULL; }
static void build_widmap(ngram_model_t * base, logmath_t * lmath, int32 n) { ngram_model_set_t *set = (ngram_model_set_t *) base; ngram_model_t **models = set->lms; hash_table_t *vocab; glist_t hlist; gnode_t *gn; int32 i; /* Construct a merged vocabulary and a set of word-ID mappings. */ vocab = hash_table_new(models[0]->n_words, FALSE); /* Create the set of merged words. */ for (i = 0; i < set->n_models; ++i) { int32 j; for (j = 0; j < models[i]->n_words; ++j) { /* Ignore collisions. */ (void) hash_table_enter_int32(vocab, models[i]->word_str[j], j); } } /* Create the array of words, then sort it. */ if (hash_table_lookup(vocab, "<UNK>", NULL) != 0) (void) hash_table_enter_int32(vocab, "<UNK>", 0); /* Now we know the number of unigrams, initialize the base model. */ ngram_model_init(base, &ngram_model_set_funcs, lmath, n, hash_table_inuse(vocab)); base->writable = FALSE; /* We will reuse the pointers from the submodels. */ i = 0; hlist = hash_table_tolist(vocab, NULL); for (gn = hlist; gn; gn = gnode_next(gn)) { hash_entry_t *ent = gnode_ptr(gn); base->word_str[i++] = (char *) ent->key; } glist_free(hlist); qsort(base->word_str, base->n_words, sizeof(*base->word_str), my_compare); /* Now create the word ID mappings. */ if (set->widmap) ckd_free_2d((void **) set->widmap); set->widmap = (int32 **) ckd_calloc_2d(base->n_words, set->n_models, sizeof(**set->widmap)); for (i = 0; i < base->n_words; ++i) { int32 j; /* Also create the master wid mapping. */ (void) hash_table_enter_int32(base->wid, base->word_str[i], i); /* printf("%s: %d => ", base->word_str[i], i); */ for (j = 0; j < set->n_models; ++j) { set->widmap[i][j] = ngram_wid(models[j], base->word_str[i]); /* printf("%d ", set->widmap[i][j]); */ } /* printf("\n"); */ } hash_table_free(vocab); }
static ps_search_t * ps_find_search(ps_decoder_t *ps, char const *name) { gnode_t *gn; for (gn = ps->searches; gn; gn = gnode_next(gn)) { if (0 == strcmp(ps_search_name(gnode_ptr(gn)), name)) return (ps_search_t *)gnode_ptr(gn); } return NULL; }
int ps_load_dict(ps_decoder_t *ps, char const *dictfile, char const *fdictfile, char const *format) { cmd_ln_t *newconfig; dict2pid_t *d2p; dict_t *dict; gnode_t *gn; int rv; /* Create a new scratch config to load this dict (so existing one * won't be affected if it fails) */ newconfig = cmd_ln_init(NULL, ps_args(), TRUE, NULL); cmd_ln_set_boolean_r(newconfig, "-dictcase", cmd_ln_boolean_r(ps->config, "-dictcase")); cmd_ln_set_str_r(newconfig, "-dict", dictfile); if (fdictfile) cmd_ln_set_str_r(newconfig, "-fdict", fdictfile); else cmd_ln_set_str_r(newconfig, "-fdict", cmd_ln_str_r(ps->config, "-fdict")); /* Try to load it. */ if ((dict = dict_init(newconfig, ps->acmod->mdef)) == NULL) { cmd_ln_free_r(newconfig); return -1; } /* Reinit the dict2pid. */ if ((d2p = dict2pid_build(ps->acmod->mdef, dict)) == NULL) { cmd_ln_free_r(newconfig); return -1; } /* Success! Update the existing config to reflect new dicts and * drop everything into place. */ cmd_ln_free_r(newconfig); cmd_ln_set_str_r(ps->config, "-dict", dictfile); if (fdictfile) cmd_ln_set_str_r(ps->config, "-fdict", fdictfile); dict_free(ps->dict); ps->dict = dict; dict2pid_free(ps->d2p); ps->d2p = d2p; /* And tell all searches to reconfigure themselves. */ for (gn = ps->searches; gn; gn = gnode_next(gn)) { ps_search_t *search = gnode_ptr(gn); if ((rv = ps_search_reinit(search, dict, d2p)) < 0) return rv; } return 0; }
int32 ngram_model_read_classdef(ngram_model_t *model, const char *file_name) { hash_table_t *classes; glist_t hl = NULL; gnode_t *gn; int32 rv = -1; classes = hash_table_new(0, FALSE); if (read_classdef_file(classes, file_name) < 0) { hash_table_free(classes); return -1; } /* Create a new class in the language model for each classdef. */ hl = hash_table_tolist(classes, NULL); for (gn = hl; gn; gn = gnode_next(gn)) { hash_entry_t *he = gnode_ptr(gn); classdef_t *classdef = he->val; if (ngram_model_add_class(model, he->key, 1.0, classdef->words, classdef->weights, classdef->n_words) < 0) goto error_out; } rv = 0; error_out: for (gn = hl; gn; gn = gnode_next(gn)) { hash_entry_t *he = gnode_ptr(gn); ckd_free((char *)he->key); classdef_free(he->val); } glist_free(hl); hash_table_free(classes); return rv; }
void kws_detections_reset(kws_detections_t *detections) { gnode_t *gn; if (!detections->detect_list) return; for (gn = detections->detect_list; gn; gn = gnode_next(gn)) ckd_free(gnode_ptr(gn)); glist_free(detections->detect_list); detections->detect_list = NULL; }
static void jsgf_rhs_free(jsgf_rhs_t *rhs) { gnode_t *gn; if (rhs == NULL) return; jsgf_rhs_free(rhs->alt); for (gn = rhs->atoms; gn; gn = gnode_next(gn)) jsgf_atom_free(gnode_ptr(gn)); glist_free(rhs->atoms); ckd_free(rhs); }
static void ps_free_searches(ps_decoder_t *ps) { gnode_t *gn; if (ps->searches == NULL) return; for (gn = ps->searches; gn; gn = gnode_next(gn)) ps_search_free(gnode_ptr(gn)); glist_free(ps->searches); ps->searches = NULL; ps->search = NULL; }
char * kws_detections_hyp_str(kws_detections_t *detections, int frame, int delay) { gnode_t *gn; char *c; int len; char *hyp_str; len = 0; for (gn = detections->detect_list; gn; gn = gnode_next(gn)) { kws_detection_t *det = (kws_detection_t *)gnode_ptr(gn); if (det->ef < frame - delay) { len += strlen(det->keyphrase) + 1; } } if (len == 0) { return NULL; } hyp_str = (char *)ckd_calloc(len, sizeof(char)); c = hyp_str; for (gn = detections->detect_list; gn; gn = gnode_next(gn)) { kws_detection_t *det = (kws_detection_t *)gnode_ptr(gn); if (det->ef < frame - delay) { memcpy(c, det->keyphrase, strlen(det->keyphrase)); c += strlen(det->keyphrase); *c = ' '; c++; } } if (c > hyp_str) { c--; *c = '\0'; } return hyp_str; }
struct winner_t dict_get_winner_wid(ngram_model_t *model, const char * word_grapheme, glist_t history_list, const int32 total_unigrams, int word_offset) { int32 current_prob = -2147483647; struct winner_t winner; int32 i = 0, j = 0; int nused; int32 ngram_order = ngram_model_get_size(model); int32 *history = ckd_calloc((size_t)ngram_order+1, sizeof(int32)); gnode_t *gn; const char *vocab; const char *sub; int32 prob; unigram_t unigram; for (gn = history_list; gn; gn = gnode_next(gn)) { history[ngram_order-j] = gnode_int32(gn); j++; if (j >= ngram_order) break; } for (i = 0; i < total_unigrams; i++) { vocab = ngram_word(model, i); unigram = dict_split_unigram(vocab); sub = word_grapheme + word_offset; if (dict_starts_with(unigram.word, sub)){ prob = ngram_ng_prob(model, i, history, j, &nused); if (current_prob < prob) { current_prob = prob; winner.winner_wid = i; winner.length_match = strlen(unigram.word); winner.len_phoneme = strlen(unigram.phone); } } if (unigram.word) ckd_free(unigram.word); if (unigram.phone) ckd_free(unigram.phone); } if (history) ckd_free(history); return winner; }
static void mark_dead_state(s2_fsg_t *_fsg, int _state, int *_marks, glist_t *_adj) { s2_fsg_trans_t *trans; glist_t itr; assert(_fsg != NULL); _marks[_state] = 1; for (itr = _adj[_state]; itr; itr = gnode_next(itr)) { trans = (s2_fsg_trans_t *)gnode_ptr(itr); if (trans->to_state == _state && _marks[trans->from_state] == -1) mark_dead_state(_fsg, trans->from_state, _marks, _adj); } }
static void sseq_compress(mdef_t * m) { hash_table_t *h; s3senid_t **sseq; int32 n_sseq; int32 p, j, k; glist_t g; gnode_t *gn; hash_entry_t *he; k = m->n_emit_state * sizeof(s3senid_t); h = hash_table_new(m->n_phone, HASH_CASE_YES); n_sseq = 0; /* Identify unique senone-sequence IDs. BUG: tmat-id not being considered!! */ for (p = 0; p < m->n_phone; p++) { /* Add senone sequence to hash table */ if ((j = (long) hash_table_enter_bkey(h, (char *) (m->sseq[p]), k, (void *)(long)n_sseq)) == n_sseq) n_sseq++; m->phone[p].ssid = j; } /* Generate compacted sseq table */ sseq = (s3senid_t **) ckd_calloc_2d(n_sseq, m->n_emit_state, sizeof(s3senid_t)); /* freed in mdef_free() */ g = hash_table_tolist(h, &j); assert(j == n_sseq); for (gn = g; gn; gn = gnode_next(gn)) { he = (hash_entry_t *) gnode_ptr(gn); j = (int32)(long)hash_entry_val(he); memcpy(sseq[j], hash_entry_key(he), k); } glist_free(g); /* Free the old, temporary senone sequence table, replace with compacted one */ ckd_free_2d((void **) m->sseq); m->sseq = sseq; m->n_sseq = n_sseq; hash_table_free(h); }