double BLSSS::find_posterior_mode() {
   BinomialLogitUnNormalizedLogPosterior logpost(m_, pri_.get());
   const Selector &inc(m_->coef().inc());
   Vector beta(m_->included_coefficients());
   int dim = beta.size();
   if (dim == 0) {
     return negative_infinity();
     // TODO: This logic prohibits an empty model.  Better to return
     // the actual value of the un-normalized posterior, which in
     // this case would just be the likelihood portion.
   } else {
     Vector gradient(dim);
     Matrix hessian(dim, dim);
     double logf;
     std::string error_message;
     bool ok = max_nd2_careful(beta,
                               gradient,
                               hessian,
                               logf,
                               Target(logpost),
                               dTarget(logpost),
                               d2Target(logpost),
                               1e-5,
                               error_message);
     if (ok) {
       m_->set_included_coefficients(beta, inc);
       return logf;
     } else {
       return negative_infinity();
     }
   }
 }
Example #2
0
 MultivariateKalmanStorage(int observation_dim, int state_dim,
                           bool store_state_moments)
     : KalmanStateStorage(store_state_moments ? state_dim : 0),
       kalman_gain_(state_dim, observation_dim),
       forecast_precision_(observation_dim),
       forecast_precision_log_determinant_(negative_infinity()),
       forecast_error_(observation_dim) {}
 double SSLM::adjusted_observation(int t) const {
   if (is_missing_observation(t)) {
     return negative_infinity();
   }
   return dat()[t]->latent_data_value()
       - observation_model_->predict(dat()[t]->x());
 }
Example #4
0
  double BM::Loglike(const Vector &probvec, Vec &g, Mat &h, uint nd)const{
    if (probvec.size() != 1) {
      report_error("Wrong size argument.");
    }
    double p = probvec[0];
    if (p < 0 || p > 1) {
      return negative_infinity();
    }
    double logp = log(p);
    double logp2 = log(1-p);

    double ntrials = n_ * suf()->nobs();
    double success = n_*suf()->sum();
    double fail = ntrials - success;

    double ans =  success * logp + fail * logp2;

    if(nd>0){
      double q = 1-p;
      g[0] = (success - p*ntrials)/(p*q);
      if(nd>1){
        h(0,0) = -1*(success/(p*p)  + fail/(q*q));
      }
    }
    return ans;
  }
  double ExponentialModel::Loglike(const Vector &lambda_vector, Vector &g,
                                   Matrix &h, uint nd) const {
    if (lambda_vector.size() != 1) {
      report_error("Wrong size argument.");
    }
    double lam = lambda_vector[0];
    double ans = 0;
    if (lam <= 0) {
      ans = negative_infinity();
      if (nd > 0) {
        g[0] = std::max(fabs(lam), .10);
        if (nd > 1) {
          h(0, 0) = -1;
        }
      }
      return ans;
    }

    double n = suf()->n();
    double sum = suf()->sum();
    ans = n * log(lam) - lam * sum;
    if (nd > 0) {
      g[0] = n / lam - sum;
      if (nd > 1) {
        h(0, 0) = -n / (lam * lam);
      }
    }
    return ans;
  }
Example #6
0
   /*======================================================================*/
   double dtriangle(double x, double x0, double x1, double xm,
 		   bool logscale){
     double m0, m1, y, ans;

     if(x1<x0){
       std::ostringstream err;
       err << "error in dtriangle: called with" << std::endl
           << "x0 = " << x0 << std::endl
           << "x1 = " << x1 << std::endl
           << "xm = " << xm << std::endl
           << "logscale = " << logscale << std::endl
           << "x0 must be less than x1";

       throw_exception<std::runtime_error>(err.str());
     }
     if(x0==x1) return x0;

     if(x<x0 || x> x1) return (logscale ? negative_infinity() : 0);

     if(xm< x0 || xm>x1) xm=(x0+x1)/2.0;
     y = 2.0/(x1-x0);
     m0 = y/(xm-x0);
     m1= y/(xm-x1);
     ans = (x<xm ? m0*(x-x0) : m1*(x-x1));
     return (logscale? log(ans): ans);
   }
Example #7
0
double BetaPosteriorSampler::logpri()const {
    double mean = model_->mean();
    double sample_size = model_->sample_size();
    if (mean <= 0 || sample_size <= 0) {
        return negative_infinity();
    }
    return mean_prior_->logp(mean) + sample_size_prior_->logp(sample_size);
}
Example #8
0
  double WishartModel::Loglike(const Vector &sumsq_triangle_nu,
                               Vector &g, uint nd)const{
    const double log2 = 0.69314718055994529;
    const double logpi = 1.1447298858494002;
    int k=dim();
    SpdParams Sumsq_arg(dim());
    Vector::const_iterator it = Sumsq_arg.unvectorize(sumsq_triangle_nu, true);
    double nu = *it;
    const SpdMatrix &SS(Sumsq_arg.var());

    if(nu <k) return negative_infinity();
    double ldSS = 0;

    bool ok=true;
    ldSS = SS.logdet(ok);
    if(!ok) return negative_infinity();

    double n = suf()->n();
    double sumldw = suf()->sumldw();
    const SpdMatrix &sumW(suf()->sumW());

    double tab = traceAB(SS, sumW);
    double tmp1(0), tmp2(0);
    for(int i = 1; i<=k; ++i){
      double tmp = .5*(nu-i+1);
      tmp1+= lgamma(tmp);
      if(nd>0) tmp2+= digamma(tmp);
    }

    double ans = .5*( n*(-nu*k*log2 - .5*k*(k-1)*logpi -2*tmp1 + nu*ldSS)
                      +(nu-k-1)*sumldw - tab);
    if(nd>0){
      double dnu = .5*( n*(-k*log2 - tmp2+ldSS) + sumldw);

      SpdMatrix SSinv(SS.inv());
      int m=0;
      for(int i=0; i<k; ++i){
        for(int j=0; j<=i; ++j){
          g[m] = .5*n*nu * (i==j? SSinv(i,i) : 2*SSinv(i,j));
          g[m] -= .5*(i==j ? sumW(i,i) : 2* sumW(i,j));
          ++m; }}
      g[m] = dnu;
    }
    return ans;
  }
Example #9
0
  double pig(double x, double mu, double lambda, bool logscale){
    if(x <= 0) return logscale ? negative_infinity() :  0;
    if(mu <= 0) throw_exception<std::runtime_error>("mu <= 0 in pig");
    if(lambda <= 0) throw_exception<std::runtime_error>("lambda <= 0 in pig");

    double rlx = sqrt(lambda/x);
    double xmu = x/mu;
    double ans = pnorm(rlx * (xmu -1)) + exp(2*lambda/mu) * pnorm(-rlx*(xmu + 1));
    return logscale ? log(ans) : ans;
  }
Example #10
0
  double dig(double x, double mu, double lambda, bool logscale){
    const double log_two_pi(1.83787706640935);
    if(x <= 0) return logscale ? negative_infinity() :  0;
    if(mu <= 0) throw_exception<std::runtime_error>("mu <= 0 in dig");
    if(lambda <= 0) throw_exception<std::runtime_error>("lambda <= 0 in dig");

    double ans = -lambda * pow(x-mu, 2)/(2 * mu * mu * x);
    ans += .5 * (log(lambda)  - log_two_pi - 3 * log(x));
    return logscale ? ans : exp(ans);
  }
//======================================================================
// One 'week' of data, which may or may not contain an observed
// monthly total.
FineNowcastingData::FineNowcastingData(
    const Vec &x,
    double coarse_observation,
    bool coarse_observation_observed,
    bool contains_end,
    double fraction_of_value_in_initial_period)
    : x_(new RegressionData(negative_infinity(), x)),
      coarse_observation_(coarse_observation),
      coarse_observation_observed_(coarse_observation_observed),
      contains_end_(contains_end),
      fraction_in_initial_period_(fraction_of_value_in_initial_period)
{}
Example #12
0
 double Bspline::knot(int i) const {
   if (knots_.empty()) {
     return negative_infinity();
   } else {
     if (i <= 0) {
       return knots_[0];
     } else if (i >= knots_.size()) {
       return knots_.back();
     } else {
       return knots_[i];
     }
   }
 }
Example #13
0
  double beta_log_likelihood(double a, double b, const BetaSuf &suf){
    if (a <= 0 || b <= 0) {
      return negative_infinity();
    }

    double n = suf.n();
    double sumlog = suf.sumlog();
    double sumlogc = suf.sumlogc();

    double ans = n*(lgamma(a + b) - lgamma(a)-lgamma(b));
    ans += (a-1)*sumlog + (b-1)*sumlogc;
    return ans;
  }
 double ExponentialModel::Logp(double x, double &g, double &h, uint nd) const {
   double lam = this->lam();
   if (lam <= 0) {
     return negative_infinity();
   }
   double ans = x < 0 ? negative_infinity() : log(lam) - lam * x;
   if (nd > 0) {
     if (lam > 0) {
       g = 1.0 / lam - x;
     } else {
       g = 1.0;
     }
     if (nd > 1) {
       if (lam > 0) {
         h = -1.0 / (lam * lam);
       } else {
         h = -1.0;
       }
     }
   }
   return ans;
 }
  double PRSS::find_posterior_mode() {
    const Selector &included(model_->inc());
    d2TargetFunPointerAdapter logpost(
        boost::bind(&PoissonRegressionModel::log_likelihood, model_,
                    _1, _2, _3, _4),
        boost::bind(&MvnBase::logp_given_inclusion, slab_prior_.get(),
                    _1, _2, _3, included, _4));
    Vector beta = model_->included_coefficients();
    int dim = beta.size();
    if (dim == 0) {
      return negative_infinity();
      // TODO: This logic prohibits an empty model.  Better to return
      // the actual value of the un-normalized log posterior, which in
      // this case would just be the likelihood portion.
    }

    Vector gradient(dim);
    Spd hessian(dim);
    double logf;
    std::string error_message;
    bool ok = max_nd2_careful(beta,
                              gradient,
                              hessian,
                              logf,
                              Target(logpost),
                              dTarget(logpost),
                              d2Target(logpost),
                              1e-5,
                              error_message);
    if (ok) {
      model_->set_included_coefficients(beta, included);
      return logf;
    } else {
      return negative_infinity();
    }
  }
  //----------------------------------------------------------------------
  // Omits a factor of (2*pi)^{N/2} \exp{-.5 * (N - 1) * s^2 } from
  // the integrated likelihood.
  double ProbitBartPosteriorSampler::log_integrated_probit_likelihood(
      const Bart::ProbitSufficientStatistics &suf) const {
    double n = suf.sample_size();
    if (n <= 0) {
      return negative_infinity();
    }
    double ybar = suf.sum() / n;  // handle n == 0;
    double prior_variance = mean_prior_variance();

    double ivar = n + (1.0 / prior_variance);
    double posterior_variance = 1.0 / ivar;
    double posterior_mean = suf.sum() / ivar;
    double ans =  log(posterior_variance / prior_variance)
        - n * square(ybar)
        + square(posterior_mean) / posterior_variance;
    return .5 * ans;
  }
Example #17
0
 double ZGS::log_prior(double sigsq, double *d1, double *d2) const {
   if (sigsq <= 0.0) {
     return negative_infinity();
   }
   double a = precision_prior_->alpha();
   double b = precision_prior_->beta();
   // The log prior is the gamma density plus a jacobian term:
   // log(abs(d(siginv) / d(sigsq))).
   if (d1) {
     double sig4 = sigsq * sigsq;
     *d1 += -(a + 1) / sigsq + b / sig4;
     if (d2) {
       double sig6 = sigsq * sig4;
       *d2 += (a + 1) / sig4 - 2 * b / sig6;
     }
   }
   return dgamma(1 / sigsq, a, b, true) - 2 * log(sigsq);
 }
Example #18
0
 double PoissonModel::Loglike(const Vector &lambda_vector,
                              Vec &g, Mat &h, uint nd)const{
   if (lambda_vector.size() != 1) {
     report_error("Wrong size argument.");
   }
   double lam = lambda_vector[0];
   if (lam < 0) {
     return negative_infinity();
   }
   Ptr<PoissonSuf> s = suf();
   double sm = s->sum();
   double n = s->n();
   double ans = sm*log(lam) - n*lam - s->lognc();
   if(nd>0){
     g[0] = sm/lam-n;
     if(nd>1) h(0,0) = -sm/(lam*lam);
   }
   return ans;
 }
  double GaussianBartPosteriorSampler::log_integrated_gaussian_likelihood(
      const Bart::GaussianBartSufficientStatistics &suf) const {
    double n = suf.n();
    if (n < 5) {
      return negative_infinity();
    }
    double prior_variance = mean_prior_variance();
    double sigsq = model_->sigsq();

    double ybar = suf.ybar();
    double sample_variance = suf.sample_var();

    double ivar = (n / sigsq) + (1.0 / prior_variance);
    double posterior_variance = 1.0 / ivar;
    double posterior_mean = (n * ybar / sigsq) / ivar;

    double ans = -n * (log_2_pi + log(sigsq)) +
                 log(posterior_variance / prior_variance) -
                 (n - 1) * sample_variance / sigsq - n * square(ybar) / sigsq +
                 square(posterior_mean) / posterior_variance;
    return .5 * ans;
  }
Example #20
0
  double BM::Loglike(const Vector &ab, Vec &g, Mat &h, uint nd) const{
    if (ab.size() != 2) {
      report_error("Wrong size argument.");
    }
    double alpha = ab[0];
    double beta = ab[1];
    if (alpha <= 0 || beta <= 0) {
      if (nd > 0) {
        g[0] = (alpha <= 0) ? 1.0 : 0.0;
        g[1] = (beta <= 0) ? 1.0 : 0.0;
        if (nd > 1) {
          h = 0.0;
          h.diag() = -1.0;
        }
      }
      return negative_infinity();
    }

    double n = suf()->n();
    double sumlog = suf()->sumlog();
    double sumlogc = suf()->sumlogc();

    double ans = n*(lgamma(alpha + beta) - lgamma(alpha)-lgamma(beta));
    ans += (alpha-1)*sumlog + (beta-1)*sumlogc;

    if(nd>0){
      double psisum = digamma(alpha + beta);
      g[0] = n*(psisum-digamma(alpha)) + sumlog;
      g[1] = n*(psisum-digamma(beta)) + sumlogc;

      if(nd>1){
 	double trisum = trigamma(alpha+beta);
 	h(0,0) = n*(trisum - trigamma(alpha));
 	h(0,1) = h(1,0) = n*trisum;
 	h(1,1) = n*(trisum - trigamma(beta));}}
    return ans;
  }
Example #21
0
 double ArPosteriorSampler::logpri()const{
   bool ok = model_->check_stationary(model_->phi());
   if(!ok) return negative_infinity();
   return siginv_prior_->logp(1.0 / model_->sigsq());
 }
 double GFFPS::logpri() const {
   report_error("Not yet implemented");
   return negative_infinity();
 }
Example #23
0
  void LiuWestParticleFilter::update(RNG &rng,
                                     const Data &observation,
                                     int observation_time) {
    //====== Step 1
    // Compute the means and variances to be used in the kernel density
    // estimate.
    std::vector<Vector> predicted_state_mean;
    predicted_state_mean.reserve(number_of_particles());
    MvnSuf suf(parameter_particles_[0].size());
    for (int i = 0; i < number_of_particles(); ++i) {
      suf.update_raw(parameter_particles_[i]);
      predicted_state_mean.push_back(hmm_->predicted_state_mean(
          state_particles_[i], observation_time, parameter_particles_[i]));
    }
    Vector parameter_mean = suf.ybar();
    Vector kernel_weights(number_of_particles());
    double max_log_weight = negative_infinity();
    std::vector<Vector> predicted_parameter_mean(number_of_particles());
    double particle_weight = sqrt(1 - square(kernel_scale_factor_));
    for (int i = 0; i < number_of_particles(); ++i) {
      predicted_parameter_mean[i] = 
          particle_weight * parameter_particles_[i]
          + (1 - particle_weight) * parameter_mean;
      kernel_weights[i] = log_weights_[i] + hmm_->log_observation_density(
          observation,
          predicted_state_mean[i],
          observation_time,
          predicted_parameter_mean[i]);
      if (!std::isfinite(kernel_weights[i])) {
        kernel_weights[i] = negative_infinity();
      }
      max_log_weight = std::max<double>(max_log_weight, kernel_weights[i]);
    }
    double total_weight = 0;
    for (int i = 0; i < number_of_particles(); ++i) {
      double weight = exp(kernel_weights[i] - max_log_weight);
      if (!std::isfinite(weight)) {
        weight = 0;
      }
      total_weight += weight;
      kernel_weights[i] = weight;
    }
    kernel_weights /= total_weight;

    SpdMatrix sample_variance = suf.sample_var();
    if (sample_variance.rank() < sample_variance.nrow()) {
      ////////////////
      // Refresh the parameter distribution.
      ////////////////
    }
    Cholesky sample_variance_cholesky(sample_variance);
    if (!sample_variance_cholesky.is_pos_def()) {
      report_error("Sample variance is not positive definite.");
    }
    Matrix variance_cholesky =
        kernel_scale_factor_ * sample_variance_cholesky.getL();

    //===== Step 2:
    // Propose new values of state and parameters, and update the weights.
    //
    // Space is needed for the new proposals, because sampling and updating is
    // done with replacement.
    std::vector<Vector> new_state_particles(number_of_particles());
    Vector new_log_weights(number_of_particles());
    for (int i = 0; i < number_of_particles(); ++i) {
      int particle = rmulti_mt(rng, kernel_weights);
      Vector parameter_proposal =
          rmvn_L_mt(rng, predicted_parameter_mean[particle], variance_cholesky);
      try {
        Vector state_proposal = hmm_->simulate_transition(
            rng, state_particles_[particle], observation_time - 1, parameter_proposal);
        parameter_particles_[i] = parameter_proposal;
        new_state_particles[i] = state_proposal;
        new_log_weights[i] =
            hmm_->log_observation_density(observation,
                                          state_proposal,
                                          observation_time,
                                          parameter_proposal)
            - hmm_->log_observation_density(observation,
                                            predicted_state_mean[particle],
                                            observation_time,
                                            predicted_parameter_mean[particle]);
        if (!std::isfinite(new_log_weights[i])) {
          new_log_weights[i] = negative_infinity();
        }
      } catch (...) {
        // If we're here it means the parameter proposal was an illegal value.
        // Stuff the new_state_particles[i] entry with the right-sized garbage,
        // and set the weight to zero (i.e. set the log weight to negative
        // infinity).
        new_state_particles[i] = state_particles_[i];
        new_log_weights[i] = negative_infinity();
      }
    }
    std::swap(new_state_particles, state_particles_);
    std::swap(new_log_weights, log_weights_);
  }