/* -- accessor functions -- */
void removeStates(Model *m, int *xu, Hmm *hmm)
{
  int v, u, w, old_uu = m->uu;
  Hmm *phmm = m->hmm;
  for(u = 0; u < m->uu; ){
    if(xu[u]){
      if(u < m->uu - 1){
	for(v = u; v < m->uu - 1; v ++){
	  xu[v] = xu[v+1];
	  copyFFM(m->ffm[v], m->ffm[v+1]);
	  set_model_b(m, v, my_exp(VECTOR(phmm->logB)[v+1]));
	  VECTOR(hmm->logB)[v] = VECTOR(hmm->logB)[v+1];
	  copyRVec(MATRIX(phmm->logA)[v], MATRIX(phmm->logA)[v+1], m->uu);
	  copyRVec(MATRIX(hmm->logA)[v], MATRIX(hmm->logA)[v+1], m->uu);
	  for(w = 0; w < m->uu; w ++){
	    set_model_a(m, w, v, my_exp(MATRIX(phmm->logA)[w][v+1]));
	    MATRIX(hmm->logA)[w][v] = MATRIX(hmm->logA)[w][v+1];
	  }
	}
      }
      m->uu --;
      phmm->uu --;
      hmm->uu --;
    }
    else u ++;
  }
  assert(m->uu > 0);

  /* Clean up the redundant memory. DO NOT clean up hmm matrix now. It will be released 
     according to the size of RMat instead of hmm->uu. */
  for(u = m->uu; u < old_uu; u ++)
    freeFFM(m->ffm[u]);
}
Esempio n. 2
0
/* displayHMM - Writes the HMM `m' in a human-readable format to output
 *  stream `f'.  Only displays a maximum of DISPLAY_HMM_MAX_U states
 *  on a single line.
 */
void displayHMM(FILE *f, Hmm *m)
{ int i, u;
  /* display B vector */
  fprintf(f, "%8s", "");
  for (u = 0; u<DISPLAY_HMM_MAX_U&&u<m->uu; u++) fprintf(f, " %4d", u);
  fprintf(f, "\n");
  fprintf(f, "HMM:  B:");
  for (u = 0; u<DISPLAY_HMM_MAX_U&&u<m->uu; u++)
  { fprintf(f, " %4.2f", my_exp(VECTOR(m->logB)[u]));}
  fprintf(f, "\n");
  for (;u<m->uu; u++)
  { fprintf(f, "\n");
    fprintf(f, "%8s", "");
    for (i = 0; i<DISPLAY_HMM_MAX_U&&i+u<m->uu; u++, i++) {
      fprintf(f, " %4d", i+u);
    }
    fprintf(f, "\n");
    for (i = 0; i<DISPLAY_HMM_MAX_U&&u<m->uu; u++, i++)
    { fprintf(f, " %4.2f", my_exp(VECTOR(m->logB)[u]));}
    fprintf(f, "\n");}
  /* display A matrix */
  fprintf(f, "%8s", "A:");
  for (u = 0; u<DISPLAY_HMM_MAX_U&&u<m->uu; u++) fprintf(f, " %4d", u);
  fprintf(f, "\n");
  for (u = 0; u<DISPLAY_HMM_MAX_U&&u<m->uu; u++)
  { fprintf(f, "%8d", u);
    for (i = 0; i<DISPLAY_HMM_MAX_U&&i<m->uu; i++)
    { fprintf(f, " %4.2f", my_exp(MATRIX(m->logA)[u][i]));}
    fprintf(f, "\n");}}
void CContinuousActionSigmoidPolicy::getGradient(CStateCollection *inputState, int outputDimension, CFeatureList *gradientFeatures)
{
	policy->getNextContinuousAction(inputState, contData);
	policy->getGradient(inputState, outputDimension, gradientFeatures);

	double min = contAction->getContinuousActionProperties()->getMinActionValue(outputDimension);
	double width = contAction->getContinuousActionProperties()->getMaxActionValue(outputDimension) - min;
	
	DebugPrint('p', "Sigmoid Gradient Calculation: Action Value %f\n", contData->getActionValue(outputDimension));
	contData->element(outputDimension) = - 2 + (contData->element(outputDimension) - min) / width * 4;
	
	double dSig = 1 / pow(1 + my_exp(- contData->element(outputDimension)), 2) * my_exp(- contData->element(outputDimension));

	if (fabs(dSig) > 10000000)
	{
		printf("Infintity gradient!! : %f, %f, %f,%f \n", dSig, contData->element(outputDimension), min,width);
		assert(false);
	}

	if (DebugIsEnabled('p'))
	{
		DebugPrint('p', "ContinuousActionPolicyGradient: ");
		gradientFeatures->saveASCII(DebugGetFileHandle('p'));
		DebugPrint('p', "\nSaturationFactor; %f\n", 4* dSig);
	}

	gradientFeatures->multFactor(4 * dSig);
}
Esempio n. 4
0
void log2linearHMM(Hmm *m){
  int u, v;
  for (u = 0; u < m->uu; u ++){
    VECTOR(m->logB)[u] = my_exp(VECTOR(m->logB)[u]);
    for (v = 0; v < m->uu; v ++)
      MATRIX(m->logA)[u][v] = my_exp(MATRIX(m->logA)[u][v]);
  }
}
void CContinuousActionSigmoidPolicy::getNextContinuousAction(CStateCollection *state, CContinuousActionData *action)
{
	policy->getNextContinuousAction(state, action);

	if (DebugIsEnabled('p'))
	{
		DebugPrint('p', "Sigmoid Policy, Action Values:");
		action->saveASCII(DebugGetFileHandle('p'));
		DebugPrint('p', "\n");
	}
	
	
	noise->initData(0.0);
	
	if (randomController && this->randomControllerMode == INTERN_RANDOM_CONTROLLER)
	{
		randomController->getNextContinuousAction(state, noise);
	}

	if (DebugIsEnabled('p'))
	{
		DebugPrint('p', "Sigmoid Policy, Noise Values:");
		noise->saveASCII(DebugGetFileHandle('p'));
		DebugPrint('p', "\n");
	}

	(*action) << *action + *noise;


	for (int i = 0; i < action->nrows(); i ++)
	{
		double min = contAction->getContinuousActionProperties()->getMinActionValue(i);
		double width = contAction->getContinuousActionProperties()->getMaxActionValue(i) - min;



		action->element(i) = - 2 + (action->element(i) - min) / width * 4;

		action->element(i) = min + width * (1.0 / (1.0 + my_exp(-action->element(i))));
	}
}
double CRBFFeatureCalculator::getFeatureFactor(CState *state, ColumnVector *position)
{
	double exponent = 0.0;
	
//	printf("position : ");
//	position->saveASCII(stdout);

	for (unsigned int i = 0; i < numDim; i++)
	{
		double difference = fabs(state->getNormalizedContinuousState(dimensions[i]) - position->element(i));

		if (state->getStateProperties()->getPeriodicity(dimensions[i]) && difference > 0.5)
		{
			difference = 1 - difference;
		}

		exponent += pow(difference / (sigma[i] * gridScale[i]), 2) / 2;
//		printf("%d: %f\n", i, difference);
	}
//	printf(" Factor %f, exponent %f, %f \n", my_exp(- exponent), exponent, sigma[0]);

	return my_exp(- exponent);
}
Real model_b(Model *m, int u)
{ assert(m!=NULL&&m->hmm!=NULL);
  if (u<0||u>=m->uu) panic("model_b(): u = %d out of range [0, %d]", u, m->uu);
  return my_exp(VECTOR(m->hmm->logB)[u]);}
Real model_a(Model *m, int u, int v)
{ assert(m!=NULL&&m->hmm!=NULL);
  if (u<0||u>=m->uu||v<0||v>=m->uu)
  { panic("model_a(): u = %d or v = %d out of range [0, %d]", u, v, m->uu);}
  return my_exp(MATRIX(m->hmm->logA)[u][v]);}
int update_with_box_scores(Model **m, Hmm **tmp_hmm, RMat *data, Real **postpC, Real log_D,
	   int *c_ls, int ll, int cc, enum training_mode training_mode,
			   int upper_triangular, RMat *score_matrices) {
  /* tmp_hmm[c] is scratch space. It must have at least as many states as
     m[c]->hmm. */
  /* needs work: must call compute_posterior externally */
  int c, l, u;
  int *xu;
  Real** scores;
  for (c = 0; c<cc; c++) {
    if (g_lastData!=data||ll>g_ll||m[c]->uu>g_uu) {
      g_lastData = data;
      initGlobals(ll, m[c]->uu, data);
    }
    zeroHMMlinear(tmp_hmm[c]);
    for (l = 0; l<ll; l++) {
      scores = MATRIX(score_matrices[l]);
      switch (training_mode) {
      case HMM_ML:
	if (c!=c_ls[l]) continue;
	for (u = 0; u<m[c]->uu; u++) {
	  FFM_logL_with_box_scores(m[c]->ffm[u], g_logL[l][u], data[l],scores[u]);
	}
	HMM_updateModel(m[c]->hmm, tmp_hmm[c], g_logL[l], g_gamma[l], log_D,
			0.0, -1, -1, training_mode);
	break;
      case HMM_DT:
	for (u = 0; u<m[c]->uu; u++) {
	  FFM_logL_with_box_scores(m[c]->ffm[u], g_logL[l][u], data[l],scores[u]);
	}
	HMM_updateModel(m[c]->hmm, tmp_hmm[c], g_logL[l], g_gamma[l], log_D,
			my_exp(postpC[c][l]), c, c_ls[l], training_mode);
	break;
      default: panic("unrecognized training mode");
      }
    }

    xu = (int *)safe_malloc(sizeof(int) * m[c]->uu);
    if (!normaliseHMMlinear(tmp_hmm[c], upper_triangular, training_mode, xu)) {
      assert(training_mode == HMM_DT);
      free(xu);
      return FALSE;
    }

    /*if  !normaliseHMMlinear(tmp_hmm[c], upper_triangular)) {
        assert(training_mode == HMM_DT);
       
      return FALSE;
      }*/

    copyHMM(m[c]->hmm, tmp_hmm[c]);
    for (u = 0; u<m[c]->uu; u++) {
      switch (training_mode) {
      case HMM_ML:
	assert(FFM_maximise(m[c]->ffm[u], data, g_gamma_r[u], ll, log_D,
			    NULL, c, c_ls));
	break;
      case HMM_DT:
	if (!FFM_maximise(m[c]->ffm[u], data, g_gamma_r[u], ll, log_D,
			  postpC[c], c, c_ls))
	  return FALSE;
	break;
      default: panic("unrecognized training mode");
      }
    }
 
  }

  return TRUE;
}
/* update - Performs a single update on the HMM model for the given data. */
int update(Model **m, Hmm **tmp_hmm, RMat *data, Real **postpC, Real log_D,
	   int *c_ls, int ll, int cc, enum training_mode training_mode,
	   int upper_triangular) {
  /* tmp_hmm[c] is scratch space. It must have at least as many states as
     m[c]->hmm. */
  int c, l, u;
  int *xu;
  for (c = 0; c<cc; c++) {
    if (g_lastData!=data||ll>g_ll||m[c]->uu>g_uu) {
      g_lastData = data;
      initGlobals(ll, m[c]->uu, data);
    }
    zeroHMMlinear(tmp_hmm[c]);
    for (l = 0; l<ll; l++) {
      switch (training_mode) {
      case HMM_ML:
	if (c!=c_ls[l]) continue;
	for (u = 0; u<m[c]->uu; u++) {
	  FFM_logL(m[c]->ffm[u], g_logL[l][u], data[l]);
	}
	HMM_updateModel(m[c]->hmm, tmp_hmm[c], g_logL[l], g_gamma[l], log_D,
			0.0, -1, -1, training_mode);
	break;
      case HMM_DT:
	for (u = 0; u<m[c]->uu; u++) {
	  FFM_logL(m[c]->ffm[u], g_logL[l][u], data[l]);
	  //if(g_logL[l][u][tt-1] != NEGATIVE_INFINITY) flag = 1;
	}
	HMM_updateModel(m[c]->hmm, tmp_hmm[c], g_logL[l], g_gamma[l], log_D,
			my_exp(postpC[c][l]), c, c_ls[l], training_mode);
	break;
      default: panic("unrecognized training mode");
      }
    }
    
    xu = (int *)safe_malloc(sizeof(int) * m[c]->uu);
    if (!normaliseHMMlinear(tmp_hmm[c], upper_triangular, training_mode, xu)) {
      assert(training_mode == HMM_DT);
      free(xu);
      return FALSE;
    }
    
    copyHMM(m[c]->hmm, tmp_hmm[c]);
    
    for (u = 0; u<m[c]->uu; u++) {
      switch (training_mode) {
      case HMM_ML:
	assert(FFM_maximise(m[c]->ffm[u], data, g_gamma_r[u], ll, log_D,
			    NULL, c, c_ls));
	break;
      case HMM_DT:
	if (!FFM_maximise(m[c]->ffm[u], data, g_gamma_r[u], ll, log_D,
			  postpC[c], c, c_ls)){
	  free(xu);
	  return FALSE;
	}
	break;
      default: panic("unrecognized training mode");
      }
    }
    
    /* Remove redundant states by shifting the memory if necessary. */
    for(u = 0; u < m[c]->uu; u ++)
      if(xu[u]) break;
    if(u < m[c]->uu)
      removeStates(m[c], xu, tmp_hmm[c]);
       
    free(xu);
  }
  return TRUE;
}
Esempio n. 11
0
      static base_t my_expm1(const base_t &i)
	{
	  return my_exp(i) - 1.0;
	}
Esempio n. 12
0
/* normaliseHMMlinear -
    Normalises an HMM in the original space so that transition probabilites
    sum to 1. After this, convert all the values from the original space to
    log space. This function should be paired with zeroHMMlinear. If some value
    in the numerator is negative while the sum is positive, return false which
    means should increase log_D; otherwise return true.
 */
int normaliseHMMlinear(Hmm *m, int upper_triangular, enum training_mode train_mode, int *xu)
{ int u, v, w;
  Real sum, corrected_sum;
  Real delta_correction = 1e-20;
  int flag;
  sum = 0.0;
  for (u = 0; u<m->uu; u++){
    //printf("b0[%d]=%e ", u, VECTOR(m->logB)[u]);
    sum += VECTOR(m->logB)[u];
  }
  //printf("\n");
  assert(fabs(sum)>0.0);
  flag = 0;
  corrected_sum = NEGATIVE_INFINITY;
  for (u = 0; u<m->uu; u++) {
    if(VECTOR(m->logB)[u]/sum < 0.0){
      //printf("@@@ b[%d] = %0.10lf sum = %0.4lf\n", u, VECTOR(m->logB)[u], sum);
      return FALSE;
    }
    VECTOR(m->logB)[u] = my_log(VECTOR(m->logB)[u]/sum);
    /* Clip the value to zero if it is smaller than delta_correction
       This jumps to another hopefully better optimization space */
    if(train_mode == HMM_DT && VECTOR(m->logB)[u] != NEGATIVE_INFINITY 
       && VECTOR(m->logB)[u] < my_log(delta_correction)){
      printf("b[%d]=%e is replaced by 0.0!\n", u, my_exp(VECTOR(m->logB)[u]));
      VECTOR(m->logB)[u] = NEGATIVE_INFINITY;
      flag = 1;
    }
    corrected_sum = add_exp(corrected_sum, VECTOR(m->logB)[u]);
  }
  /* Normalise the corrected array is necessary */
  if(flag){
    for(u = 0; u < m->uu; u ++)
      VECTOR(m->logB)[u] -= corrected_sum;
  }

  for (u = 0; u<m->uu; u++)
  { sum = 0.0;
    w = (upper_triangular?u:0);
    for (v = w; v<m->uu; v++) sum += MATRIX(m->logA)[u][v];
    if (sum==0.0) {
      /* A state never jumps to another state (include itself)
	 This is not the unreachable state. It may be a final state or a useless state.
	 An unreachable state is finally to be a useless state.
         We can just keep all the outward transition probabilities zero. */
      // Do NOTHING. We don't want a corrected normal distribution. 
      for(v = 0; v < m->uu; v ++)
	MATRIX(m->logA)[u][v] = NEGATIVE_INFINITY;
      continue;
    }
    flag = 0;
    corrected_sum = NEGATIVE_INFINITY;
    for (v = 0; v<m->uu; v ++) {
      if(MATRIX(m->logA)[u][v]/sum < 0.0){
	//printf("@@@ m->A[%d][%d] = %0.4lf   sum = %0.4lf\n", u, v, MATRIX(m->logA)[u][v], sum);
	return FALSE;
      }
      MATRIX(m->logA)[u][v] = my_log(MATRIX(m->logA)[u][v]/sum);
      /* Clip the value to zero if it is smaller than delta_correction
       This jumps to another hopefully better optimization space */
      if(train_mode == HMM_DT && MATRIX(m->logA)[u][v] != NEGATIVE_INFINITY 
	 && MATRIX(m->logA)[u][v] < my_log(delta_correction)){
	//printf("a[%d][%d]=%e is replaced by 0.0!\n", u, v, my_exp(MATRIX(m->logA)[u][v]));
	MATRIX(m->logA)[u][v] = NEGATIVE_INFINITY;
	flag = 1;
      }
      corrected_sum = add_exp(corrected_sum, MATRIX(m->logA)[u][v]);
    }
    if(flag){
      for(v = 0; v < m->uu; v ++)
	MATRIX(m->logA)[u][v] -= corrected_sum;
    }
  }

  /* Check whether any column in logA has entries that are all NEGATIVE_INFINITY,
     which is an unreachable state. */
  memset(xu, 0, sizeof(int) * m->uu);
  for(u = 0; u < m->uu; u ++){
    flag = 0;
    if(VECTOR(m->logB)[u] == NEGATIVE_INFINITY) flag ++;
    for(v = 0; v < m->uu; v ++)
      if(MATRIX(m->logA)[v][u] != NEGATIVE_INFINITY) break;
    if(v == m->uu) flag ++;
    if(flag == 2){
      //printf("AN UNREACHABLE STATE IS TO BE REMOVED! State_#: %d\n", u);
      xu[u] = 1;
    }
  }
   
  return TRUE;
}
double CSingleStateRBFFeatureCalculator::getFeatureFactor(int , double difference, double diffNextPart)
{
	double distance = fabs(difference);

	return my_exp(- pow(distance / diffNextPart * 2, 2)) ;
}
Esempio n. 14
0
/* HMM_updateModel -
 *  Given an HMM and a vector of log likelihoods for states in the sequences,
 *  calculates the responsibilities of each state in the HMM for each symbol
 *  in the sequences, and maximises the model parameters based on the
 *  assigned log likelihoods.
*/
Real HMM_updateModel(Hmm *m, Hmm *new_m, RVec *logL, RVec *gamma, Real log_D,
		     Real postpC, int c, int c_ls,
		     enum training_mode training_mode) {
  int t, u, v, tt = VLENGTH(logL[0]);
  Real **a = MATRIX(m->logA), *b = VECTOR(m->logB), **al, **be, ***ps;
  Real logD = 0, like, dtf;
  int Sc = (c==c_ls);
  switch (training_mode) {
  case HMM_ML:
    assert(postpC==0.0);
    logD = NEGATIVE_INFINITY;
    break;
  case HMM_DT:
    assert(c>=0&&c_ls>=0);
    logD = log_D;
    break;
  default: panic("unrecognized training mode");
  }
  assert(VLENGTH(logL[0])==VLENGTH(gamma[0]));
  HMM_initGlobals(m->uu, tt);
  al = MATRIX(g_alpha);
  be = MATRIX(g_beta);
  ps = MATRIX(g_psi);
  /* calculate alpha's */
  HMM_calcAlphas(m, logL);
  /* calculate beta's -
   * beta[u][tt] = 1
   * beta[u][t] = sum(v = 1 ... m->uu, a[u][v]*beta[v][t+1]*logL[v][t+1])
   */
  for (u = 0; u<m->uu; u++) be[u][tt-1] = 0.0;
  for (t = tt-2; t>=0; t--)
  { for (u = 0; u<m->uu; u++)
    { be[u][t] = NEGATIVE_INFINITY;
      for (v = 0; v<m->uu; v++)
      { be[u][t] =
	add_exp(be[u][t], a[u][v]+be[v][t+1]+VECTOR(logL[v])[t+1]);}}}

  /* calculate logL of sequence -
   * P(sequence|m) = sum(u = 1 ... m->uu, alpha[u][tt])
   */
  like = NEGATIVE_INFINITY;
  for (u = 0; u<m->uu; u++)
    like = add_exp(like, al[u][tt-1]);

  /* A sample that can NEVER belong to this category */
  if(like == NEGATIVE_INFINITY){
    assert(postpC == 0.0);
    assert(Sc==0);
  }

  /* calculate responsibilities
   *               alpha[u][t]*beta[u][t]
   * gamma[u][t] = ----------------------
   *                    P(data|model)
   */
  for (t = 0; t<tt; t++){
     for (u = 0; u<m->uu; u++){
       if(like!=NEGATIVE_INFINITY)
	 VECTOR(gamma[u])[t] = al[u][t]+be[u][t]-like;
       else
	 VECTOR(gamma[u])[t] = NEGATIVE_INFINITY;
     }
  }
  /* calculate time-indexed transition probabilities
   *                alpha[u][t]*a[u][v]*logL[v][t+1]*beta[v][t+1]
   * psi[u][v][t] = ---------------------------------------------
   *                               P(data|model)
   */
  for (u = 0; u<m->uu; u++){
    for (v = 0; v<m->uu; v++){
      for (t = 0; t<tt-1; t++){
	if(like!=NEGATIVE_INFINITY)
	  ps[u][v][t] = al[u][t]+a[u][v]+VECTOR(logL[v])[t+1]+be[v][t+1]-like;
	else
	  ps[u][v][t] = NEGATIVE_INFINITY;
      }
    }
  }
  /* Update new model. The model may have been partly updated by some training
     samples. */
  a = MATRIX(new_m->logA);
  b = VECTOR(new_m->logB);
  /* calculate B
     b[u] = gamma[u][1]
     - added scaling by sum of gammas to catch any numerical accuracy problems
     not log space here
   */
  for (u = 0; u<m->uu; u++) {
    /* This may be negative */
    b[u] += (Sc-postpC)*my_exp(VECTOR(gamma[u])[0])
      +my_exp(logD+VECTOR(m->logB)[u]);
  }
  /* calculate A matrix
   *                    sum(t = 1 ... tt-1, psi[u][v][t])
   * a[u][v] = -------------------------------------------------------
   *           sum(t = 1 ... tt-1, sum(w = 1 ... m->uu, psi[u][w][t]))
   * see note above about log space
   */
  for (u = 0; u<m->uu; u++) {
    for (v = 0; v<m->uu; v++) {
      /* This may be negative */
      dtf = 0.0;
      for(t = 0; t<tt-1; t++)
	dtf += my_exp(ps[u][v][t])*(Sc-postpC) + my_exp(logD+MATRIX(m->logA)[u][v]);
      a[u][v] += dtf;
    }
  }
  for (t = 0; t<tt; t++) {
    for (u = 0; u<m->uu; u++) VECTOR(gamma[u])[t] = my_exp(VECTOR(gamma[u])[t]);
  }
  return like;
}