Exemple #1
0
// Runs the backward CTC pass, filling in log_probs.
void CTC::Backward(GENERIC_2D_ARRAY<double>* log_probs) const {
  log_probs->Resize(num_timesteps_, num_labels_, -MAX_FLOAT32);
  log_probs->put(num_timesteps_ - 1, num_labels_ - 1, 0.0);
  if (labels_[num_labels_ - 1] == null_char_)
    log_probs->put(num_timesteps_ - 1, num_labels_ - 2, 0.0);
  for (int t = num_timesteps_ - 2; t >= 0; --t) {
    const float* outputs_tp1 = outputs_[t + 1];
    for (int u = min_labels_[t]; u <= max_labels_[t]; ++u) {
      // Continuing the same label.
      double log_sum = log_probs->get(t + 1, u) + log(outputs_tp1[labels_[u]]);
      // Change from previous label.
      if (u + 1 < num_labels_) {
        double prev_prob = outputs_tp1[labels_[u + 1]];
        log_sum =
            LogSumExp(log_sum, log_probs->get(t + 1, u + 1) + log(prev_prob));
      }
      // Skip the null if allowed.
      if (u + 2 < num_labels_ && labels_[u + 1] == null_char_ &&
          labels_[u] != labels_[u + 2]) {
        double skip_prob = outputs_tp1[labels_[u + 2]];
        log_sum =
            LogSumExp(log_sum, log_probs->get(t + 1, u + 2) + log(skip_prob));
      }
      log_probs->put(t, u, log_sum);
    }
  }
}
Exemple #2
0
// Runs the forward CTC pass, filling in log_probs.
void CTC::Forward(GENERIC_2D_ARRAY<double>* log_probs) const {
  log_probs->Resize(num_timesteps_, num_labels_, -MAX_FLOAT32);
  log_probs->put(0, 0, log(outputs_(0, labels_[0])));
  if (labels_[0] == null_char_)
    log_probs->put(0, 1, log(outputs_(0, labels_[1])));
  for (int t = 1; t < num_timesteps_; ++t) {
    const float* outputs_t = outputs_[t];
    for (int u = min_labels_[t]; u <= max_labels_[t]; ++u) {
      // Continuing the same label.
      double log_sum = log_probs->get(t - 1, u);
      // Change from previous label.
      if (u > 0) {
        log_sum = LogSumExp(log_sum, log_probs->get(t - 1, u - 1));
      }
      // Skip the null if allowed.
      if (u >= 2 && labels_[u - 1] == null_char_ &&
          labels_[u] != labels_[u - 2]) {
        log_sum = LogSumExp(log_sum, log_probs->get(t - 1, u - 2));
      }
      // Add in the log prob of the current label.
      double label_prob = outputs_t[labels_[u]];
      log_sum += log(label_prob);
      log_probs->put(t, u, log_sum);
    }
  }
}
Exemple #3
0
	double TGMCMC::SampleSub(Node *nd, Node *target) const
	{
		if (nd->IsLeaf()) return 0;
		int pos = 0;
		std::vector<double> p;
		double max_log_d = -INF;
		Node::Queue ndque;
		ndque.push(nd);
		while (!ndque.empty()) {
			nd = ndque.front();
			ndque.pop();
			if (!nd->IsLeaf()) {
				p.push_back(nd->st.log_d);
				if (nd == target) pos = p.size() - 1;
				max_log_d = std::max(max_log_d, nd->st.log_d);
				if (!nd->IsLeaf()) {
					ndque.push(nd->ch0);
					ndque.push(nd->ch1);
				}
			}
		}
		double nc = -INF;
		for (int j = 0; j < p.size(); ++j) {
			p[j] = LogSumExp(p[j], max_log_d);
			nc = LogSumExp(p[j], nc);
		}
		return p[pos] - nc;
	}
Exemple #4
0
	Node * TGMCMC::SampleSub(Node *nd, bool draw_leaf, 
		double &log_trans) const
	{
		if (nd->IsLeaf()) return nd;
		std::vector<Node*> ndvec;
		std::vector<double> p;
		double max_log_d = -INF;
		Node::Queue ndque;
		ndque.push(nd);
		while (!ndque.empty()) {
			nd = ndque.front();
			ndque.pop();
			if (draw_leaf || !nd->IsLeaf()) {
				ndvec.push_back(nd);
				p.push_back(nd->st.log_d);
				max_log_d = std::max(max_log_d, nd->st.log_d);
				if (!nd->IsLeaf()) {
					ndque.push(nd->ch0);
					ndque.push(nd->ch1);
				}
			}
		}
		double nc = -INF;
		for (int j = 0; j < p.size(); ++j) {
			p[j] = LogSumExp(p[j], max_log_d);
			nc = LogSumExp(p[j], nc);
		}
		for (int j = 0; j < p.size(); ++j)
			p[j] = exp(p[j] - nc);
		int j = RandMult(p);
		log_trans += log(p[j]);
		return ndvec[j];
	}
	// Evaluate this model at x
	double GaussMixtureEvaluator::EvaluateLog(const VecD& x) const {
		VecD ys(gm.ncomps);
		for (int i = 0; i < gm.ncomps; i++) {
			ys[i] = evaluators[i]->EvaluateLog(x) + logweights[i];
		}
		return LogSumExp(ys);
	}
void LSE_AddObjectiveAndGradient(AppCtx* context, PetscScalar* solution)
{
  timetype start;
  timetype finish;

  start = GET_TIME_METHOD();
  context->criteriaValues.lse = LogSumExp(context, solution);
  //ALERT("LSE = %f", addValue));
  finish = GET_TIME_METHOD();
  lseTime += finish - start;

  start = GET_TIME_METHOD();
  int nClusterVariables = 2 * context->ci->mCurrentNumberOfClusters;
  AddLogSumExpGradient(context, nClusterVariables, solution, context->criteriaValues.gLSE);
  finish = GET_TIME_METHOD();
  lseGradTime += finish - start;
}
		for (int i = 0; i < ss_.size(); ++i)
			indices_[i] = i;
		ind_to_cl_.assign(ss_.size(), 0);
	}

	void NRMMGibbsSampler::SampleCl(int ind)
	{
		std::vector<double> p(clset_.size() + 1, 0);
		double nc = -INF;
		int j = 0;
		foreach(it, clset_) {
			p[j] = mu_->LogKappaJoin(it->n) + mu_->LogPred(ss_[ind], it->ss);
			nc = LogSumExp(nc, p[j++]);
		}
		p[j] = mu_->LogKappaNew() + mu_->LogMarginal(ss_[ind]);
		nc = LogSumExp(nc, p[j]);
		for (j = 0; j < p.size(); ++j)
			p[j] = exp(p[j] - nc);
		j = RandMult(p);
		if (j < clset_.size()) {
			Cluster *cl = *std::next(clset_.begin(), j);
			cl->Add(ss_[ind]);
			ind_to_cl_[ind] = cl;
		}
		else {
			Cluster *new_cl = new Cluster(ss_[ind]);
			clset_.insert(new_cl);
			ind_to_cl_[ind] = new_cl;
		}
	}