示例#1
0
cst_utterance *cst_spamf0(cst_utterance *utt)
{
    cst_track *spamf0_track = NULL;
    cst_track *param_track = NULL;
    cst_item *s;
    cst_cg_db *cg_db;
    const cst_cart *acc_tree, *phrase_tree;
    float end, f0val, syldur;
    int num_frames, f, i;
    cg_db = val_cg_db(utt_feat_val(utt, "cg_db"));

    spamf0_track = new_track();
    cst_track_resize(spamf0_track,
                     (utt_feat_int(utt, "param_track_num_frames")), 1);
    acc_tree = cg_db->spamf0_accent_tree;
    phrase_tree = cg_db->spamf0_phrase_tree;
    end = 0.0;
    num_frames = 0;
    for (s = utt_rel_head(utt, "Segment"); s; s = item_next(s))
    {
        end = ffeature_float(s, "end");
        if (cst_streq("pau", ffeature_string(s, "name")))
        {
            f0val = 0;
        }
        else
        {
            f0val = val_float(cart_interpret(s, phrase_tree));
        }

        for (;
             ((num_frames * cg_db->frame_advance) <= end)
             && (num_frames < utt_feat_int(utt, "param_track_num_frames"));
             num_frames++)
        {
            spamf0_track->frames[num_frames][0] = f0val;
        }
    }

    for (s = utt_rel_head(utt, "Syllable"); s; s = item_next(s))
    {
        f = val_int(cart_interpret(s, acc_tree));
        syldur = ffeature_float(s, "R:SylStructure.daughtern.R:Segment.end")
            - ffeature_float(s, "R:SylStructure.daughter1.R:Segment.p.end");
        cst_synthtilt(cg_db,
                      ffeature_float(s,
                                     "R:SylStructure.daughter1.R:Segment.p.end"),
                      cg_db->spamf0_accent_vectors[f][0],
                      cg_db->spamf0_accent_vectors[f][2], syldur,
                      cg_db->spamf0_accent_vectors[f][6], spamf0_track);
    }
    param_track = val_track(utt_feat_val(utt, "param_track"));
    for (i = 0; i < utt_feat_int(utt, "param_track_num_frames"); i++)
    {
        param_track->frames[i][0] = spamf0_track->frames[i][0];
    }
    delete_track(spamf0_track);
    return utt;
}
示例#2
0
static cst_utterance *cg_predict_params(cst_utterance *utt)
{
    cst_cg_db *cg_db;
    cst_track *param_track;
    cst_track *str_track = NULL;
    cst_item *mcep;
    const cst_cart *mcep_tree, *f0_tree;
    int i,j,f,p,fd,o;
    const char *mname;
    float f0_val;
    int fff;
    int extra_feats = 0;

    cg_db = val_cg_db(utt_feat_val(utt,"cg_db"));
    param_track = new_track();
    if (cg_db->do_mlpg) /* which should be the default */
        fff = 1;  /* copy details with stddevs */
    else
        fff = 2;  /* copy details without stddevs */

    extra_feats = 1;  /* voicing */
    if (cg_db->mixed_excitation)
    {
        extra_feats += 5;
        str_track = new_track();
        cst_track_resize(str_track,
                         utt_feat_int(utt,"param_track_num_frames"),
                         5);
    }
    
    cst_track_resize(param_track,
                     utt_feat_int(utt,"param_track_num_frames"),
                     (cg_db->num_channels0/fff)-
                       (2 * extra_feats));/* no voicing or str */
    for (i=0,mcep=utt_rel_head(utt,"mcep"); mcep; i++,mcep=item_next(mcep))
    {
        mname = item_feat_string(mcep,"name");
        for (p=0; cg_db->types[p]; p++)
            if (cst_streq(mname,cg_db->types[p]))
                break;
        if (cg_db->types[0] == NULL)
            p=0; /* if there isn't a matching tree, use the first one */

        /* Predict F0 */
        f0_tree = cg_db->f0_trees[p];
        f0_val = val_float(cart_interpret(mcep,f0_tree));
        param_track->frames[i][0] = f0_val;
        /* what about stddev ? */

        if (cg_db->multimodel)
        {   /* MULTI model */
            f = val_int(cart_interpret(mcep,cg_db->param_trees0[p]));
            fd = val_int(cart_interpret(mcep,cg_db->param_trees1[p]));
            item_set_int(mcep,"clustergen_param_frame",f);

            param_track->frames[i][0] = 
                (param_track->frames[i][0]+
                 CG_MODEL_VECTOR(cg_db,model_vectors0,f,0)+
                 CG_MODEL_VECTOR(cg_db,model_vectors1,fd,0))/3.0;
            for (j=2; j<param_track->num_channels; j++)
                param_track->frames[i][j] = 
                    (CG_MODEL_VECTOR(cg_db,model_vectors0,f,(j)*fff)+
                     CG_MODEL_VECTOR(cg_db,model_vectors1,fd,(j)*fff))/2.0;
            if (cg_db->mixed_excitation)
            {
                o = j;
                for (j=0; j<5; j++)
                {
                    str_track->frames[i][j] =
                        (CG_MODEL_VECTOR(cg_db,model_vectors0,f,(o+(2*j))*fff)+
                         CG_MODEL_VECTOR(cg_db,model_vectors1,fd,(o+(2*j))*fff))/2.0;
                }
            }
        }
        else  
        {   /* SINGLE model */
            /* Predict Spectral */
            mcep_tree = cg_db->param_trees0[p];
            f = val_int(cart_interpret(mcep,mcep_tree));
            item_set_int(mcep,"clustergen_param_frame",f);

            param_track->frames[i][0] = 
                (param_track->frames[i][0]+
                 CG_MODEL_VECTOR(cg_db,model_vectors0,f,0))/2.0;

            for (j=2; j<param_track->num_channels; j++)
                param_track->frames[i][j] =
                    CG_MODEL_VECTOR(cg_db,model_vectors0,f,(j)*fff);

            if (cg_db->mixed_excitation)
            {
                o = j;
                for (j=0; j<5; j++)
                {
                    str_track->frames[i][j] =
                        CG_MODEL_VECTOR(cg_db,model_vectors0,f,(o+(2*j))*fff);
                }
            }
        }

        /* last coefficient is average voicing for cluster */
        item_set_float(mcep,"voicing",
                       CG_MODEL_VECTOR(cg_db,model_vectors0,f,
                                       cg_db->num_channels0-2));

        param_track->times[i] = i * cg_db->frame_advance;
    }

    cg_smooth_F0(utt,cg_db,param_track);

    utt_set_feat(utt,"param_track",track_val(param_track));
    if (cg_db->mixed_excitation)
        utt_set_feat(utt,"str_track",track_val(str_track));

    return utt;
}