/* HMM_logL -
 *  Given an HMM and a vector of log likelihoods for states assigned to
 *  data vectors in the sequence, returns the log likelihood of the data
 *  given the HMM.
 */
Real HMM_logL(Hmm *m, RVec *logL)
{ int u, tt = VLENGTH(logL[0]);
  Real l = NEGATIVE_INFINITY;
  /* P(sequence|m) = sum(u = 1 ... m->uu, alpha[u][tt]) */
  HMM_calcAlphas(m, logL);
  for (u = 0; u<m->uu; u++) l = add_exp(l, MATRIX(g_alpha)[u][tt-1]);
  return l;}
Exemple #2
0
/* HMM_calcGammas computes gammas, which are the probabilities that
 * each frame is in each state
 */
void HMM_calcGammas(Hmm *m, RVec *logL, RVec *gamma) {
  int t, u, v, tt = VLENGTH(logL[0]);
  Real **a = MATRIX(m->logA), **al, **be;
  Real like;

  assert(VLENGTH(logL[0])==VLENGTH(gamma[0]));
  HMM_initGlobals(m->uu, tt);
  al = MATRIX(g_alpha);
  be = MATRIX(g_beta);
  /* calculate alpha's */
  HMM_calcAlphas(m, logL);
  /* calculate beta's -
   * beta[u][tt] = 1
   * beta[u][t] = sum(v = 1 ... m->uu, a[u][v]*beta[v][t+1]*logL[v][t+1])
   */
  for (u = 0; u<m->uu; u++) be[u][tt-1] = 0.0;
  for (t = tt-2; t>=0; t--)
    { for (u = 0; u<m->uu; u++)
	{ be[u][t] = NEGATIVE_INFINITY;
	  for (v = 0; v<m->uu; v++)
	    { be[u][t] =
		add_exp(be[u][t], a[u][v]+be[v][t+1]+VECTOR(logL[v])[t+1]);}}}
  /* calculate logL of sequence -
   * P(sequence|m) = sum(u = 1 ... m->uu, alpha[u][tt])
   */
  like = NEGATIVE_INFINITY;
  for (u = 0; u<m->uu; u++) like = add_exp(like, al[u][tt-1]);
  /* calculate responsibilities
   *               alpha[u][t]*beta[u][t]
   * gamma[u][t] = ----------------------
   *                    P(data|model)
   */
  for (t = 0; t<tt; t++)
    { for (u = 0; u<m->uu; u++) VECTOR(gamma[u])[t] = al[u][t]+be[u][t]-like;}}
Exemple #3
0
/* HMM_updateModel -
 *  Given an HMM and a vector of log likelihoods for states in the sequences,
 *  calculates the responsibilities of each state in the HMM for each symbol
 *  in the sequences, and maximises the model parameters based on the
 *  assigned log likelihoods.
*/
Real HMM_updateModel(Hmm *m, Hmm *new_m, RVec *logL, RVec *gamma, Real log_D,
		     Real postpC, int c, int c_ls,
		     enum training_mode training_mode) {
  int t, u, v, tt = VLENGTH(logL[0]);
  Real **a = MATRIX(m->logA), *b = VECTOR(m->logB), **al, **be, ***ps;
  Real logD = 0, like, dtf;
  int Sc = (c==c_ls);
  switch (training_mode) {
  case HMM_ML:
    assert(postpC==0.0);
    logD = NEGATIVE_INFINITY;
    break;
  case HMM_DT:
    assert(c>=0&&c_ls>=0);
    logD = log_D;
    break;
  default: panic("unrecognized training mode");
  }
  assert(VLENGTH(logL[0])==VLENGTH(gamma[0]));
  HMM_initGlobals(m->uu, tt);
  al = MATRIX(g_alpha);
  be = MATRIX(g_beta);
  ps = MATRIX(g_psi);
  /* calculate alpha's */
  HMM_calcAlphas(m, logL);
  /* calculate beta's -
   * beta[u][tt] = 1
   * beta[u][t] = sum(v = 1 ... m->uu, a[u][v]*beta[v][t+1]*logL[v][t+1])
   */
  for (u = 0; u<m->uu; u++) be[u][tt-1] = 0.0;
  for (t = tt-2; t>=0; t--)
  { for (u = 0; u<m->uu; u++)
    { be[u][t] = NEGATIVE_INFINITY;
      for (v = 0; v<m->uu; v++)
      { be[u][t] =
	add_exp(be[u][t], a[u][v]+be[v][t+1]+VECTOR(logL[v])[t+1]);}}}

  /* calculate logL of sequence -
   * P(sequence|m) = sum(u = 1 ... m->uu, alpha[u][tt])
   */
  like = NEGATIVE_INFINITY;
  for (u = 0; u<m->uu; u++)
    like = add_exp(like, al[u][tt-1]);

  /* A sample that can NEVER belong to this category */
  if(like == NEGATIVE_INFINITY){
    assert(postpC == 0.0);
    assert(Sc==0);
  }

  /* calculate responsibilities
   *               alpha[u][t]*beta[u][t]
   * gamma[u][t] = ----------------------
   *                    P(data|model)
   */
  for (t = 0; t<tt; t++){
     for (u = 0; u<m->uu; u++){
       if(like!=NEGATIVE_INFINITY)
	 VECTOR(gamma[u])[t] = al[u][t]+be[u][t]-like;
       else
	 VECTOR(gamma[u])[t] = NEGATIVE_INFINITY;
     }
  }
  /* calculate time-indexed transition probabilities
   *                alpha[u][t]*a[u][v]*logL[v][t+1]*beta[v][t+1]
   * psi[u][v][t] = ---------------------------------------------
   *                               P(data|model)
   */
  for (u = 0; u<m->uu; u++){
    for (v = 0; v<m->uu; v++){
      for (t = 0; t<tt-1; t++){
	if(like!=NEGATIVE_INFINITY)
	  ps[u][v][t] = al[u][t]+a[u][v]+VECTOR(logL[v])[t+1]+be[v][t+1]-like;
	else
	  ps[u][v][t] = NEGATIVE_INFINITY;
      }
    }
  }
  /* Update new model. The model may have been partly updated by some training
     samples. */
  a = MATRIX(new_m->logA);
  b = VECTOR(new_m->logB);
  /* calculate B
     b[u] = gamma[u][1]
     - added scaling by sum of gammas to catch any numerical accuracy problems
     not log space here
   */
  for (u = 0; u<m->uu; u++) {
    /* This may be negative */
    b[u] += (Sc-postpC)*my_exp(VECTOR(gamma[u])[0])
      +my_exp(logD+VECTOR(m->logB)[u]);
  }
  /* calculate A matrix
   *                    sum(t = 1 ... tt-1, psi[u][v][t])
   * a[u][v] = -------------------------------------------------------
   *           sum(t = 1 ... tt-1, sum(w = 1 ... m->uu, psi[u][w][t]))
   * see note above about log space
   */
  for (u = 0; u<m->uu; u++) {
    for (v = 0; v<m->uu; v++) {
      /* This may be negative */
      dtf = 0.0;
      for(t = 0; t<tt-1; t++)
	dtf += my_exp(ps[u][v][t])*(Sc-postpC) + my_exp(logD+MATRIX(m->logA)[u][v]);
      a[u][v] += dtf;
    }
  }
  for (t = 0; t<tt; t++) {
    for (u = 0; u<m->uu; u++) VECTOR(gamma[u])[t] = my_exp(VECTOR(gamma[u])[t]);
  }
  return like;
}