コード例 #1
0
ファイル: hmm.c プロジェクト: LinaW/sentence-training
/* HMM_calcGammas computes gammas, which are the probabilities that
 * each frame is in each state
 */
void HMM_calcGammas(Hmm *m, RVec *logL, RVec *gamma) {
  int t, u, v, tt = VLENGTH(logL[0]);
  Real **a = MATRIX(m->logA), **al, **be;
  Real like;

  assert(VLENGTH(logL[0])==VLENGTH(gamma[0]));
  HMM_initGlobals(m->uu, tt);
  al = MATRIX(g_alpha);
  be = MATRIX(g_beta);
  /* calculate alpha's */
  HMM_calcAlphas(m, logL);
  /* calculate beta's -
   * beta[u][tt] = 1
   * beta[u][t] = sum(v = 1 ... m->uu, a[u][v]*beta[v][t+1]*logL[v][t+1])
   */
  for (u = 0; u<m->uu; u++) be[u][tt-1] = 0.0;
  for (t = tt-2; t>=0; t--)
    { for (u = 0; u<m->uu; u++)
	{ be[u][t] = NEGATIVE_INFINITY;
	  for (v = 0; v<m->uu; v++)
	    { be[u][t] =
		add_exp(be[u][t], a[u][v]+be[v][t+1]+VECTOR(logL[v])[t+1]);}}}
  /* calculate logL of sequence -
   * P(sequence|m) = sum(u = 1 ... m->uu, alpha[u][tt])
   */
  like = NEGATIVE_INFINITY;
  for (u = 0; u<m->uu; u++) like = add_exp(like, al[u][tt-1]);
  /* calculate responsibilities
   *               alpha[u][t]*beta[u][t]
   * gamma[u][t] = ----------------------
   *                    P(data|model)
   */
  for (t = 0; t<tt; t++)
    { for (u = 0; u<m->uu; u++) VECTOR(gamma[u])[t] = al[u][t]+be[u][t]-like;}}
コード例 #2
0
ファイル: vector.c プロジェクト: qyqx/wisp
object_t *vector_concat (object_t * a, object_t * b)
{
  size_t al = VLENGTH (a), bl = VLENGTH (b);
  object_t *c = c_vec (al + bl, NIL);
  size_t i;
  for (i = 0; i < al; i++)
    vset (c, i, UPREF (vget (a, i)));
  for (i = 0; i < bl; i++)
    vset (c, i + al, UPREF (vget (b, i)));
  return c;
}
コード例 #3
0
/* HMM_logL -
 *  Given an HMM and a vector of log likelihoods for states assigned to
 *  data vectors in the sequence, returns the log likelihood of the data
 *  given the HMM.
 */
Real HMM_logL(Hmm *m, RVec *logL)
{ int u, tt = VLENGTH(logL[0]);
  Real l = NEGATIVE_INFINITY;
  /* P(sequence|m) = sum(u = 1 ... m->uu, alpha[u][tt]) */
  HMM_calcAlphas(m, logL);
  for (u = 0; u<m->uu; u++) l = add_exp(l, MATRIX(g_alpha)[u][tt-1]);
  return l;}
コード例 #4
0
ファイル: hmm.c プロジェクト: LinaW/sentence-training
/* HMM_best_state_sequence -
 *  Given an HMM and a vector of log likelihoods for states assigned to
 *  data vectors in the sequence, returns the best state sequence of the data
 *  given the HMM.
 */
int *HMM_best_state_sequence(Hmm *m, RVec *logL)
{ int u, t, tt = VLENGTH(logL[0]), *rv;
  Real b, d;
  rv = safe_malloc(tt * sizeof *rv);
  HMM_calcDeltas(m, logL);
  b = NEGATIVE_INFINITY;
  rv[tt-1] = 0;
  for (u = 0; u<m->uu; u++)
  { d = MATRIX(g_delta)[u][tt-1];
    if (d>b)
    { b = d;
      rv[tt-1] = u;}}
  for (t = tt-2; t>=0; t--) rv[t] = MATRIX(g_deltav)[rv[t+1]][t+1];
  return rv;}
コード例 #5
0
ファイル: hmm.c プロジェクト: LinaW/sentence-training
/* HMM_calcAlphas -
 *  Given an HMM and a vector of log likelihoods for states assigned to
 *  data vectors in the sequence, calculates the alpha values from the
 *  forward-backward algorithm.
 */
void HMM_calcAlphas(Hmm *m, RVec *logL)
{ int t, u, v, tt = VLENGTH(logL[0]);
  Real **al, **a = MATRIX(m->logA), *b = VECTOR(m->logB);
  HMM_initGlobals(m->uu, tt);
  al = MATRIX(g_alpha);
  /* alpha[u][1] = b[u]*logL[u][1]
   * alpha[u][t] = sum(v = 1 ... m->uu, a[v][u]*alpha[v][t-1])*logL[u][t]
   */
  for (u = 0; u<m->uu; u++)
    al[u][0] = VECTOR(logL[u])[0]+b[u];
  for (t = 1; t<tt; t++)
  { for (u = 0; u<m->uu; u++)
    { al[u][t] = NEGATIVE_INFINITY;
      for (v = 0; v<m->uu; v++)
      { al[u][t] = add_exp(al[u][t], a[v][u]+al[v][t-1]);}
      al[u][t] += VECTOR(logL[u])[t];
    }
  }
}
コード例 #6
0
/* HMM_calcDeltas -
 *  Given an HMM and a vector of log likelihoods for states assigned to
 *  data vectors in the sequence, calculates the delta values from the
 *  forward-backward algorithm.
 */
void HMM_calcDeltas(Hmm *m, RVec *logL)
{ int t, u, v, tt = VLENGTH(logL[0]);
  /* dev could be int. */
  Real **de, **dev, **a = MATRIX(m->logA), *b = VECTOR(m->logB), d;
  HMM_initGlobals(m->uu, tt);
  de = MATRIX(g_delta);
  dev = MATRIX(g_deltav);	/* This could be int. */
  /* delta[u][1] = b[u]*logL[u][1]
   * delta[u][t] = max(v = 1 ... m->uu, a[v][u]*delta[v][t-1])*logL[u][t]
   */
  for (u = 0; u<m->uu; u++) de[u][0] = VECTOR(logL[u])[0]+b[u];
  for (t = 1; t<tt; t++)
  { for (u = 0; u<m->uu; u++)
    { de[u][t] = NEGATIVE_INFINITY;
      dev[u][t] = 0;		/* This could be int. */
      for (v = 0; v<m->uu; v++)
      { d = a[v][u]+de[v][t-1];
	if (d>de[u][t])
	{ de[u][t] = d;
	  dev[u][t] = v;}}	/* This could be int. */
      de[u][t] += VECTOR(logL[u])[t];}}}
コード例 #7
0
   * gamma[u][t] = ----------------------
   *                    P(data|model)
   */
  for (t = 0; t<tt; t++)
    { for (u = 0; u<m->uu; u++) VECTOR(gamma[u])[t] = al[u][t]+be[u][t]-like;}}

/* HMM_updateModel -
 *  Given an HMM and a vector of log likelihoods for states in the sequences,
 *  calculates the responsibilities of each state in the HMM for each symbol
 *  in the sequences, and maximises the model parameters based on the
 *  assigned log likelihoods.
*/
Real HMM_updateModel(Hmm *m, Hmm *new, RVec *logL, RVec *gamma, Real log_D, 
		     Real postpC, int c, int c_ls,
		     enum training_mode training_mode) {
  int t, u, v, tt = VLENGTH(logL[0]);
  Real **a = MATRIX(m->logA), *b = VECTOR(m->logB), **al, **be, ***ps;
  Real logD, like, dtf;
  int Sc = (c==c_ls);
  switch (training_mode) {
  case HMM_ML:
    assert(postpC==0.0);	/* needs work: ask yu239 */
    logD = NEGATIVE_INFINITY;
    break;
  case HMM_DT:
    assert(c>=0&&c_ls>=0);
    logD = log_D;
    break;
  default: panic("unrecognized training mode");
  }
  assert(VLENGTH(logL[0])==VLENGTH(gamma[0]));
コード例 #8
0
ファイル: g_range.c プロジェクト: fedser/Plotutils
/* update bounding box due to drawing of a line join (args are in user coors)*/
void
_set_line_join_bbox (plOutbuf *bufp, double xleft, double yleft, double x, double y, double xright, double yright, double linewidth, int joinstyle, double miterlimit, double m[6])
{
  plVector v1, v2, vsum;
  double v1len, v2len;
  double halfwidth;
  double mitrelen;

  switch (joinstyle)
    {
    case PL_JOIN_MITER:
    default:
      v1.x = xleft - x;
      v1.y = yleft - y;
      v2.x = xright - x;
      v2.y = yright - y;
      v1len = VLENGTH(v1);
      v2len = VLENGTH(v2);
      if (v1len == 0.0 || v2len == 0.0)
	_update_bbox (bufp, XD_INTERNAL(x,y,m), YD_INTERNAL(x,y,m));
      else
	{
	  double cosphi;
	  
	  /* The maximum value the cosine of the angle between two joining
	     lines may have, if the join is to be mitered rather than
	     beveled, is 1-2/(M*M), where M is the mitrelimit.  This is
	     because M equals the cosecant of one-half the minimum angle. */
	  cosphi = ((v1.x * v2.x + v1.y * v2.y) / v1len) / v2len;
	  if (miterlimit <= 1.0
	      || (cosphi > (1.0 - 2.0 / (miterlimit * miterlimit))))
	    /* bevel rather than miter */
	    {
	      _set_line_end_bbox (bufp, x, y, xleft, yleft, linewidth, PL_CAP_BUTT, m);
	      _set_line_end_bbox (bufp,x, y, xright, yright, linewidth, PL_CAP_BUTT, m);
	    }
	  else
	    {
	      mitrelen = sqrt (1.0 / (2.0 - 2.0 * cosphi)) * linewidth;
	      vsum.x = v1.x + v2.x;
	      vsum.y = v1.y + v2.y;
	      _vscale (&vsum, mitrelen);
	      x -= vsum.x;
	      y -= vsum.y;
	      _update_bbox (bufp, XD_INTERNAL(x,y,m), YD_INTERNAL(x,y,m));
	    }
	}
      break;
    case PL_JOIN_TRIANGULAR:
      /* add a miter vertex, and same vertices as when bevelling */
      v1.x = xleft - x;
      v1.y = yleft - y;
      v2.x = xright - x;
      v2.y = yright - y;
      vsum.x = v1.x + v2.x;
      vsum.y = v1.y + v2.y;
      _vscale (&vsum, 0.5 * linewidth);
      x -= vsum.x;
      y -= vsum.y;
      _update_bbox (bufp, XD_INTERNAL(x,y,m), YD_INTERNAL(x,y,m));
      x += vsum.x;
      y += vsum.y;
      /* fall through */
    case PL_JOIN_BEVEL:
      _set_line_end_bbox (bufp, x, y, xleft, yleft, linewidth, PL_CAP_BUTT, m);
      _set_line_end_bbox (bufp, x, y, xright, yright, linewidth, PL_CAP_BUTT, m);
      break;
    case PL_JOIN_ROUND:
      halfwidth = 0.5 * linewidth;
      _set_ellipse_bbox (bufp, x, y, halfwidth, halfwidth, 1.0, 0.0, 0.0, m);
      break;
    }
}
コード例 #9
0
ファイル: hmm.c プロジェクト: LinaW/sentence-training
/* HMM_updateModel -
 *  Given an HMM and a vector of log likelihoods for states in the sequences,
 *  calculates the responsibilities of each state in the HMM for each symbol
 *  in the sequences, and maximises the model parameters based on the
 *  assigned log likelihoods.
*/
Real HMM_updateModel(Hmm *m, Hmm *new_m, RVec *logL, RVec *gamma, Real log_D,
		     Real postpC, int c, int c_ls,
		     enum training_mode training_mode) {
  int t, u, v, tt = VLENGTH(logL[0]);
  Real **a = MATRIX(m->logA), *b = VECTOR(m->logB), **al, **be, ***ps;
  Real logD = 0, like, dtf;
  int Sc = (c==c_ls);
  switch (training_mode) {
  case HMM_ML:
    assert(postpC==0.0);
    logD = NEGATIVE_INFINITY;
    break;
  case HMM_DT:
    assert(c>=0&&c_ls>=0);
    logD = log_D;
    break;
  default: panic("unrecognized training mode");
  }
  assert(VLENGTH(logL[0])==VLENGTH(gamma[0]));
  HMM_initGlobals(m->uu, tt);
  al = MATRIX(g_alpha);
  be = MATRIX(g_beta);
  ps = MATRIX(g_psi);
  /* calculate alpha's */
  HMM_calcAlphas(m, logL);
  /* calculate beta's -
   * beta[u][tt] = 1
   * beta[u][t] = sum(v = 1 ... m->uu, a[u][v]*beta[v][t+1]*logL[v][t+1])
   */
  for (u = 0; u<m->uu; u++) be[u][tt-1] = 0.0;
  for (t = tt-2; t>=0; t--)
  { for (u = 0; u<m->uu; u++)
    { be[u][t] = NEGATIVE_INFINITY;
      for (v = 0; v<m->uu; v++)
      { be[u][t] =
	add_exp(be[u][t], a[u][v]+be[v][t+1]+VECTOR(logL[v])[t+1]);}}}

  /* calculate logL of sequence -
   * P(sequence|m) = sum(u = 1 ... m->uu, alpha[u][tt])
   */
  like = NEGATIVE_INFINITY;
  for (u = 0; u<m->uu; u++)
    like = add_exp(like, al[u][tt-1]);

  /* A sample that can NEVER belong to this category */
  if(like == NEGATIVE_INFINITY){
    assert(postpC == 0.0);
    assert(Sc==0);
  }

  /* calculate responsibilities
   *               alpha[u][t]*beta[u][t]
   * gamma[u][t] = ----------------------
   *                    P(data|model)
   */
  for (t = 0; t<tt; t++){
     for (u = 0; u<m->uu; u++){
       if(like!=NEGATIVE_INFINITY)
	 VECTOR(gamma[u])[t] = al[u][t]+be[u][t]-like;
       else
	 VECTOR(gamma[u])[t] = NEGATIVE_INFINITY;
     }
  }
  /* calculate time-indexed transition probabilities
   *                alpha[u][t]*a[u][v]*logL[v][t+1]*beta[v][t+1]
   * psi[u][v][t] = ---------------------------------------------
   *                               P(data|model)
   */
  for (u = 0; u<m->uu; u++){
    for (v = 0; v<m->uu; v++){
      for (t = 0; t<tt-1; t++){
	if(like!=NEGATIVE_INFINITY)
	  ps[u][v][t] = al[u][t]+a[u][v]+VECTOR(logL[v])[t+1]+be[v][t+1]-like;
	else
	  ps[u][v][t] = NEGATIVE_INFINITY;
      }
    }
  }
  /* Update new model. The model may have been partly updated by some training
     samples. */
  a = MATRIX(new_m->logA);
  b = VECTOR(new_m->logB);
  /* calculate B
     b[u] = gamma[u][1]
     - added scaling by sum of gammas to catch any numerical accuracy problems
     not log space here
   */
  for (u = 0; u<m->uu; u++) {
    /* This may be negative */
    b[u] += (Sc-postpC)*my_exp(VECTOR(gamma[u])[0])
      +my_exp(logD+VECTOR(m->logB)[u]);
  }
  /* calculate A matrix
   *                    sum(t = 1 ... tt-1, psi[u][v][t])
   * a[u][v] = -------------------------------------------------------
   *           sum(t = 1 ... tt-1, sum(w = 1 ... m->uu, psi[u][w][t]))
   * see note above about log space
   */
  for (u = 0; u<m->uu; u++) {
    for (v = 0; v<m->uu; v++) {
      /* This may be negative */
      dtf = 0.0;
      for(t = 0; t<tt-1; t++)
	dtf += my_exp(ps[u][v][t])*(Sc-postpC) + my_exp(logD+MATRIX(m->logA)[u][v]);
      a[u][v] += dtf;
    }
  }
  for (t = 0; t<tt; t++) {
    for (u = 0; u<m->uu; u++) VECTOR(gamma[u])[t] = my_exp(VECTOR(gamma[u])[t]);
  }
  return like;
}