SLLPS::SharedLocalLevelPosteriorSampler(
      SharedLocalLevelStateModel *model,
      const std::vector<Ptr<GammaModelBase>> &innovation_precision_priors,
      const Matrix &coefficient_prior_mean,
      double observation_coefficient_prior_sample_size,
      RNG &seeding_rng)
      : PosteriorSampler(seeding_rng),
        model_(model),
        innovation_precision_priors_(innovation_precision_priors)
  {
    if (innovation_precision_priors_.size() != model_->state_dimension()) {
      std::ostringstream err;
      err << "The model has state dimension " << model_->state_dimension()
          << " but " << innovation_precision_priors_.size()
          << " priors were passed.  They should match.";
      report_error(err.str());
    }

    for (int i = 0; i < innovation_precision_priors_.size(); ++i) {
      variance_samplers_.emplace_back(innovation_precision_priors_[i]);
    }

    observation_coefficient_sampler_.reset(
        new MultivariateRegressionSampler(
            model_->coefficient_model().get(),
            coefficient_prior_mean,
            observation_coefficient_prior_sample_size,
            1.0,
            SpdMatrix(innovation_precision_priors_.size()),
            rng()));
  }
 BinomialLogitSamplerRwm::BinomialLogitSamplerRwm(BinomialLogitModel *model,
                                                  Ptr<MvnBase> prior,
                                                  double nu,
                                                  RNG &seeding_rng)
     :PosteriorSampler(seeding_rng),
      m_(model),
      pri_(prior),
      proposal_(new MvtRwmProposal(SpdMatrix(model->xdim(), 1.0), nu)),
      sam_(BinomialLogitLogPosterior(m_, pri_), proposal_)
 {}
 void NeRegSuf::Update(const RegressionData &rdp){
   ++n_;
   int p = rdp.xdim();
   if(xtx_.nrow()==0 || xtx_.ncol()==0)
     xtx_ = SpdMatrix(p,0.0);
   if(xty_.size()==0) xty_ = Vector(p, 0.0);
   const Vector & tmpx(rdp.x());  // add_intercept(rdp.x());
   double y = rdp.y();
   xty_.axpy(tmpx, y);
   if(!xtx_is_fixed_) {
     xtx_.add_outer(tmpx, 1.0, false);
     needs_to_reflect_ = true;
   }
   sumsqy+= y*y;
   sumy_ += y;
   x_column_sums_.axpy(tmpx, 1.0);
 }
    //======================================================================
    ISAM::DafePcrItemSampler(Ptr<PCR> Mod, Ptr<DafePcrDataImputer> Imp,
                 Ptr<MvnModel> Prior, double Tdf, RNG &seeding_rng)
      : PosteriorSampler(seeding_rng),
  mod(Mod),
    prior(Prior),
    imp(Imp),
    sigsq(1.644934066848226) // pi^2/6
    {
      Matrix X(Mod->X(1.0));
      xtx = SpdMatrix(X.ncol());
      xtu = Vector(X.ncol());

      ItemDafeTF target(mod, prior, imp);
      uint dim = mod->beta().size();
      SpdMatrix Ominv(dim);
      Ominv.set_diag(1.0);
      prop = new MvtIndepProposal(Vector(dim), Ominv, Tdf);
      sampler = new MetropolisHastings(target, prop);
    }
  Vector StateSpaceLogitModel::one_step_holdout_prediction_errors(
      RNG &rng,
      BinomialLogitDataImputer &data_imputer,
      const Vector &successes,
      const Vector &trials,
      const Matrix &predictors,
      const Vector &final_state) {
    if (nrow(predictors) != successes.size()
        || trials.size() != successes.size()) {
      report_error("Size mismatch in arguments provided to "
                   "one_step_holdout_prediction_errors.");
    }
    Vector ans(successes.size());
    int t0 = dat().size();
    ScalarKalmanStorage ks(state_dimension());
    ks.a = *state_transition_matrix(t0 - 1) * final_state;
    ks.P = SpdMatrix(state_variance_matrix(t0 - 1)->dense());

    // This function differs from the Gaussian case because the
    // response is on the binomial scale, and the state model is on
    // the logit scale.  Because of the nonlinearity, we need to
    // incorporate the uncertainty about the forecast in the
    // prediction for the observation.  We do this by imputing the
    // latent logit and its mixture indicator for each observation.
    // The strategy is (for each observation)
    //   1) simulate next state.
    //   2) simulate w_t given state
    //   3) kalman update state given w_t.
    for (int t = 0; t < ans.size(); ++t) {
      bool missing = false;
      Vector state = rmvn(ks.a, ks.P);

      double state_contribution = observation_matrix(t+t0).dot(state);
      double regression_contribution =
          observation_model_->predict(predictors.row(t));
      double mu = state_contribution + regression_contribution;
      double prediction = trials[t] * plogis(mu);
      ans[t] = successes[t] - prediction;

      // ans[t] is a random draw of the one step ahead prediction
      // error at time t0+t given observed data to time t0+t-1.  We
      // now proceed with the steps needed to update the Kalman filter
      // so we can compute ans[t+1].

      double precision_weighted_sum, total_precision;
      std::tie(precision_weighted_sum, total_precision) = data_imputer.impute(
          rng,
          trials[t],
          successes[t],
          mu);
      double latent_observation = precision_weighted_sum / total_precision;
      double latent_variance = 1.0 / total_precision;

      // The latent state was drawn from its predictive distribution
      // given Y[t0 + t -1] and used to impute the latent data for
      // y[t0+t].  That latent data is now used to update the Kalman
      // filter for the next time period.  It is important that we
      // discard the imputed state at this point.
      sparse_scalar_kalman_update(latent_observation - regression_contribution,
                                  ks.a,
                                  ks.P,
                                  ks.K,
                                  ks.F,
                                  ks.v,
                                  missing,
                                  observation_matrix(t + t0),
                                  latent_variance,
                                  *state_transition_matrix(t + t0),
                                  *state_variance_matrix(t + t0));
    }
    return ans;
  }
Example #6
0
 Spd ToBoomSpd(SEXP m){
   return SpdMatrix(ToBoomMatrixView(m));
 }
 void QrRegSuf::clear(){
   sumsqy = 0;
   Qty = 0;
   qr = QR(SpdMatrix(Qty.size(), 0.0));
 }