// Args:
 //   rng:  The random number generator.
 //   binary_inputs: The value of the inputs to the terminal layer (i.e. the
 //     outputs from the final hidden layer).  These will be updated by the
 //     imputation.
 //   logprob: On input this is a vector giving the marginal (un-logged)
 //     probability that each input node is active.  These values will be
 //     over-written by their logarithms.
 //   logprob_complement: On input this is any vector with size matching
 //     logprob.  On output its elements contain log(1 - exp(logprob)).
 //
 // Effects:
 //   The latent data for the terminal layer is imputed, and the sufficient
 //   statistics for the latent regression model in the terminal layer are
 //   updated to included the imputed data.
 void GFFPS::impute_terminal_layer_inputs(
     RNG &rng,
     double response,
     std::vector<bool> &binary_inputs,
     Vector &logprob,
     Vector &logprob_complement) {
   for (int i = 0; i < logprob.size(); ++i) {
     logprob_complement[i] = log(1 - logprob[i]);
     logprob[i] = log(logprob[i]);
   }
   Vector terminal_layer_inputs(binary_inputs.size());
   Nnet::to_numeric(binary_inputs, terminal_layer_inputs);
   double logp_original = terminal_inputs_log_full_conditional(
       response, terminal_layer_inputs, logprob, logprob_complement);
   for (int i = 0; i < terminal_layer_inputs.size(); ++i) {
     terminal_layer_inputs[i] = 1 - terminal_layer_inputs[i];
     double logp = terminal_inputs_log_full_conditional(
         response, terminal_layer_inputs, logprob, logprob_complement);
     double log_input_prob = logp - lse2(logp, logp_original);
     double logu = log(runif_mt(rng));
     if (logu < log_input_prob) {
       logp_original = logp;
     } else {
       terminal_layer_inputs[i] = 1 - terminal_layer_inputs[i];
     }
   }
   model_->terminal_layer()->suf()->add_mixture_data(
       response, terminal_layer_inputs, 1.0);
   Nnet::to_binary(terminal_layer_inputs, binary_inputs);
 }
Beispiel #2
0
  double logit_loglike_1(const Vec & beta, bool y, const Vec &x,
			 Vec *g, Mat *h, double mix_wgt){
    double eta = x.dot(beta);
    double lognc = lse2(0, eta);
    double ans = y?  eta : 0;
    ans -= lognc;
    if(g){
      double p = exp(eta-lognc);
      g->axpy(x, mix_wgt* (y-p));
      if(h){
	double q = 1-p;
	h->add_outer( x,x, -mix_wgt * p*q);}}
    return mix_wgt * ans;
  }
Beispiel #3
0
 void MlvsDataImputer::impute_latent_data_point(const ChoiceData &dp,
                                                SufficientStatistics *suf,
                                                RNG &rng) {
   model_->fill_eta(dp, eta);  // eta+= downsampling_logprob
   if (downsampling_) eta += log_sampling_probs_;  //
   uint M = model_->Nchoices();
   uint y = dp.value();
   assert(y < M);
   double loglam = lse(eta);
   double logzmin = rlexp_mt(rng, loglam);
   u[y] = -logzmin;
   for (uint m = 0; m < M; ++m) {
     if (m != y) {
       double tmp = rlexp_mt(rng, eta[m]);
       double logz = lse2(logzmin, tmp);
       u[m] = -logz;
     }
     uint k = unmix(rng, u[m] - eta[m]);
     u[m] -= mu_[k];
     wgts[m] = sigsq_inv_[k];
   }
   suf->update(dp, wgts, u);
 }