Vector SSLM::simulate_forecast(const Matrix &forecast_predictors, const Vector &trials, const Vector &final_state) { StateSpaceModelBase::set_state_model_behavior(StateModel::MARGINAL); Vector ans(nrow(forecast_predictors)); Vector state = final_state; int t0 = dat().size(); for (int t = 0; t < ans.size(); ++t) { state = simulate_next_state(state, t + t0); double eta = observation_matrix(t + t0).dot(state) + observation_model_->predict(forecast_predictors.row(t)); double probability = plogis(eta); ans[t] = rbinom(lround(trials[t]), probability); } return ans; }
KalmanTrace::KalmanTrace(const KalmanMetaInfo& meta) : meta(meta), have_estimate(false), have_prediction(false) { observation_matrix.setZero(); for (int i = 0; i < Dimensions; i++) observation_matrix(i,i) = 1; }
Vector StateSpaceLogitModel::one_step_holdout_prediction_errors( RNG &rng, BinomialLogitDataImputer &data_imputer, const Vector &successes, const Vector &trials, const Matrix &predictors, const Vector &final_state) { if (nrow(predictors) != successes.size() || trials.size() != successes.size()) { report_error("Size mismatch in arguments provided to " "one_step_holdout_prediction_errors."); } Vector ans(successes.size()); int t0 = dat().size(); ScalarKalmanStorage ks(state_dimension()); ks.a = *state_transition_matrix(t0 - 1) * final_state; ks.P = SpdMatrix(state_variance_matrix(t0 - 1)->dense()); // This function differs from the Gaussian case because the // response is on the binomial scale, and the state model is on // the logit scale. Because of the nonlinearity, we need to // incorporate the uncertainty about the forecast in the // prediction for the observation. We do this by imputing the // latent logit and its mixture indicator for each observation. // The strategy is (for each observation) // 1) simulate next state. // 2) simulate w_t given state // 3) kalman update state given w_t. for (int t = 0; t < ans.size(); ++t) { bool missing = false; Vector state = rmvn(ks.a, ks.P); double state_contribution = observation_matrix(t+t0).dot(state); double regression_contribution = observation_model_->predict(predictors.row(t)); double mu = state_contribution + regression_contribution; double prediction = trials[t] * plogis(mu); ans[t] = successes[t] - prediction; // ans[t] is a random draw of the one step ahead prediction // error at time t0+t given observed data to time t0+t-1. We // now proceed with the steps needed to update the Kalman filter // so we can compute ans[t+1]. double precision_weighted_sum, total_precision; std::tie(precision_weighted_sum, total_precision) = data_imputer.impute( rng, trials[t], successes[t], mu); double latent_observation = precision_weighted_sum / total_precision; double latent_variance = 1.0 / total_precision; // The latent state was drawn from its predictive distribution // given Y[t0 + t -1] and used to impute the latent data for // y[t0+t]. That latent data is now used to update the Kalman // filter for the next time period. It is important that we // discard the imputed state at this point. sparse_scalar_kalman_update(latent_observation - regression_contribution, ks.a, ks.P, ks.K, ks.F, ks.v, missing, observation_matrix(t + t0), latent_variance, *state_transition_matrix(t + t0), *state_variance_matrix(t + t0)); } return ans; }
void SSLM::observe_data_given_state(int t) { if (!is_missing_observation(t)) { dat()[t]->set_offset(observation_matrix(t).dot(state(t))); signal_complete_data_change(t); } }