示例#1
0
文件: StateModel.cpp 项目: Hkey1/boom
 void StateModel::simulate_initial_state(VectorView eta)const{
   if(eta.size() != state_dimension()){
     std::ostringstream err;
     err << "output vector 'eta' has length " << eta.size()
         << " in StateModel::simulate_initial_state.  Expected length "
         << state_dimension();
     report_error(err.str());
   }
   eta = rmvn(initial_state_mean(), initial_state_variance());
 }
示例#2
0
文件: ArModel.cpp 项目: comenerv/Boom
 Vec ArModel::simulate(int n) const {
   int p = number_of_lags();
   Vec acf = autocovariance(p);
   Spd Sigma(p);
   Sigma.diag() = acf[0];
   for(int i = 1; i < p; ++i) {
     Sigma.subdiag(i) = acf[i];
     Sigma.superdiag(i) = acf[i];
   }
   Vec zero(p, 0.0);
   Vec y0 = rmvn(zero, Sigma);
   return simulate(n, y0);
 }
示例#3
0
文件: ArModel.cpp 项目: cran/Boom
 Vector ArModel::simulate(int n, RNG &rng) const {
   int p = number_of_lags();
   Vector acf = autocovariance(p);
   SpdMatrix Sigma(p);
   Sigma.diag() = acf[0];
   for (int i = 1; i < p; ++i) {
     Sigma.subdiag(i) = acf[i];
     Sigma.superdiag(i) = acf[i];
   }
   Vector zero(p, 0.0);
   Vector y0 = rmvn(zero, Sigma);
   return simulate(n, y0, rng);
 }
  Vector StateSpaceLogitModel::one_step_holdout_prediction_errors(
      RNG &rng,
      BinomialLogitDataImputer &data_imputer,
      const Vector &successes,
      const Vector &trials,
      const Matrix &predictors,
      const Vector &final_state) {
    if (nrow(predictors) != successes.size()
        || trials.size() != successes.size()) {
      report_error("Size mismatch in arguments provided to "
                   "one_step_holdout_prediction_errors.");
    }
    Vector ans(successes.size());
    int t0 = dat().size();
    ScalarKalmanStorage ks(state_dimension());
    ks.a = *state_transition_matrix(t0 - 1) * final_state;
    ks.P = SpdMatrix(state_variance_matrix(t0 - 1)->dense());

    // This function differs from the Gaussian case because the
    // response is on the binomial scale, and the state model is on
    // the logit scale.  Because of the nonlinearity, we need to
    // incorporate the uncertainty about the forecast in the
    // prediction for the observation.  We do this by imputing the
    // latent logit and its mixture indicator for each observation.
    // The strategy is (for each observation)
    //   1) simulate next state.
    //   2) simulate w_t given state
    //   3) kalman update state given w_t.
    for (int t = 0; t < ans.size(); ++t) {
      bool missing = false;
      Vector state = rmvn(ks.a, ks.P);

      double state_contribution = observation_matrix(t+t0).dot(state);
      double regression_contribution =
          observation_model_->predict(predictors.row(t));
      double mu = state_contribution + regression_contribution;
      double prediction = trials[t] * plogis(mu);
      ans[t] = successes[t] - prediction;

      // ans[t] is a random draw of the one step ahead prediction
      // error at time t0+t given observed data to time t0+t-1.  We
      // now proceed with the steps needed to update the Kalman filter
      // so we can compute ans[t+1].

      double precision_weighted_sum, total_precision;
      std::tie(precision_weighted_sum, total_precision) = data_imputer.impute(
          rng,
          trials[t],
          successes[t],
          mu);
      double latent_observation = precision_weighted_sum / total_precision;
      double latent_variance = 1.0 / total_precision;

      // The latent state was drawn from its predictive distribution
      // given Y[t0 + t -1] and used to impute the latent data for
      // y[t0+t].  That latent data is now used to update the Kalman
      // filter for the next time period.  It is important that we
      // discard the imputed state at this point.
      sparse_scalar_kalman_update(latent_observation - regression_contribution,
                                  ks.a,
                                  ks.P,
                                  ks.K,
                                  ks.F,
                                  ks.v,
                                  missing,
                                  observation_matrix(t + t0),
                                  latent_variance,
                                  *state_transition_matrix(t + t0),
                                  *state_variance_matrix(t + t0));
    }
    return ans;
  }