예제 #1
0
  ComparePredictionsOutput compare_predictions(const ConstVectorView &truth,
                                               const ConstVectorView &pred) {
    Matrix X(truth.size(), 2);
    X.col(0) = 1.0;
    X.col(1) = truth;
    RegressionModel model(X, pred);
    Vector null_residual = pred - truth;
    Vector beta = model.Beta();
    Vector alternative_residual = pred - X * beta;
    // Under the null hypothesis, residual and alternative_residual
    // will have the same distribution, but the alternative_residual
    // will have used two degrees of freedom, while the null reisdual
    // will have used zero.
    int n = truth.size();
    double SSE = alternative_residual.normsq();
    double SST = null_residual.normsq();
    double SSR = SST - SSE;
    double Fstat = (SSE / (n - 2)) / (SSR / 2.0);
    double p_value = pf(Fstat, n - 2, 2, false);
    ComparePredictionsOutput result;

    SpdMatrix xtx(2, 0.0);
    xtx.add_inner(X);
    Vector beta_standard_errors = sqrt(model.sigsq() * (xtx.inv().diag()));

    result.intercept = beta[0];
    result.intercept_se = beta_standard_errors[0];
    result.slope = beta[1];
    result.slope_se = beta_standard_errors[1];
    result.SSE = SSE;
    result.SST = SST;
    result.Fstat = Fstat;
    result.p_value = p_value;
    return result;
  }
예제 #2
0
파일: MvtModel.cpp 프로젝트: cran/Boom
  double MVT::loglike(const Vector &mu_siginv_triangle_nu) const {
    const DatasetType &dat(this->dat());
    const ConstVectorView mu(mu_siginv_triangle_nu, 0, dim());
    SpdMatrix siginv(dim());
    Vector::const_iterator it = mu_siginv_triangle_nu.cbegin() + dim();
    siginv.unvectorize(it, true);
    double ldsi = siginv.logdet();
    double nu = mu_siginv_triangle_nu.back();
    double lognu = log(nu);

    const double logpi = 1.1447298858494;
    uint n = dat.size();
    uint d = mu.size();
    double half_npd = .5 * (nu + d);

    double ans = lgamma(half_npd) - lgamma(nu / 2) - .5 * d * (lognu + logpi);
    ans += .5 * ldsi + half_npd * lognu;
    ans *= n;

    for (uint i = 0; i < n; ++i) {
      double delta = siginv.Mdist(mu, dat[i]->value());
      ans -= half_npd * log(nu + delta / nu);
    }

    return ans;
  }
예제 #3
0
 void DynamicRegressionStateModel::observe_state(
     const ConstVectorView then, const ConstVectorView now, int time_now){
   check_size(then.size());
   check_size(now.size());
   for(int i = 0; i < then.size(); ++i){
     double change = now[i] - then[i];
     coefficient_transition_model_[i]->suf()->update_raw(change);
   }
 }
예제 #4
0
파일: EigenMap.hpp 프로젝트: cran/Boom
 inline ::Eigen::Map<const ::Eigen::VectorXd, ::Eigen::Unaligned,
                     ::Eigen::InnerStride<::Eigen::Dynamic>>
 EigenMap(const ConstVectorView &view) {
   return ::Eigen::Map<const ::Eigen::VectorXd,
                       ::Eigen::Unaligned,
                       ::Eigen::InnerStride<::Eigen::Dynamic>>(
       view.data(),
       view.size(),
       ::Eigen::InnerStride<::Eigen::Dynamic>(view.stride()));
 }
예제 #5
0
파일: ArModel.cpp 프로젝트: comenerv/Boom
 Vector ArModel::autocovariance(int number_of_lags)const{
   set_filter_coefficients();
   Vec ans(number_of_lags + 1);
   for(int lag = 0; lag <= number_of_lags; ++lag){
     int n = filter_coefficients_.size() - lag;
     const ConstVectorView psi(filter_coefficients_, 0, n);
     const ConstVectorView lag_psi(filter_coefficients_, lag, n);
     ans[lag] = psi.dot(lag_psi);
   }
   return ans * sigsq();
 }
예제 #6
0
 double ZGS::log_prior_density(const ConstVectorView &parameters) const {
   if (parameters.size() != 1) {
     report_error(
         "Wrong size parameters passed to "
         "ZeroMeanGaussianConjSampler::log_prior_density.");
   }
   return log_prior(parameters[0], nullptr, nullptr);
 }
예제 #7
0
 double ZGS::increment_log_prior_gradient(const ConstVectorView &parameters,
                                          VectorView gradient) const {
   if (parameters.size() != 1 || gradient.size() != 1) {
     report_error(
         "Wrong size arguments passed to "
         "ZeroMeanGaussianConjSampler::increment_log_prior_gradient.");
   }
   return log_prior(parameters[0], &gradient[0], nullptr);
 }
void LMSM::check_dim(const ConstVectorView &v)const {
    if(v.size() != 3) {
        ostringstream err;
        err << "improper dimesion of ConstVectorView v = :"
            << v << endl
            << "in LocalLinearTrendMeanRevertingSlopeStateModel.  "
            << "Should be of dimension 3"
            << endl;
        report_error(err.str());
    }
}
void LMAT::Tmult(VectorView lhs, const ConstVectorView &rhs)const {
    if(lhs.size()!=3) {
        report_error("lhs is the wrong size in LMAT::Tmult");
    }
    if(rhs.size()!=3) {
        report_error("rhs is the wrong size in LMAT::Tmult");
    }
    lhs[0] = rhs[0];
    double phi = phi_->value();
    lhs[1] = rhs[0] + phi * rhs[1];
    lhs[2] = (1-phi) * rhs[1] + rhs[2];
}
예제 #10
0
 void DRSM::increment_expected_gradient(
     VectorView gradient, int t, const ConstVectorView &state_error_mean,
     const ConstSubMatrix &state_error_variance) {
   if (gradient.size() != xdim_ || state_error_mean.size() != xdim_ ||
       state_error_variance.nrow() != xdim_ ||
       state_error_variance.ncol() != xdim_) {
     report_error(
         "Wrong size arguments passed to "
         "DynamicRegressionStateModel::increment_expected_gradient.");
   }
   for (int i = 0; i < xdim_; ++i) {
     double mean = state_error_mean[i];
     double var = state_error_variance(i, i);
     double sigsq = DynamicRegressionStateModel::sigsq(i);
     ;
     double tmp = (var + mean * mean) / (sigsq * sigsq) - 1.0 / sigsq;
     gradient[i] += .5 * tmp;
   }
 }
예제 #11
0
파일: SpdMatrix.cpp 프로젝트: Hkey1/boom
 SpdMatrix outer(const ConstVectorView &v){
   SpdMatrix ans(v.size(), 0.0);
   ans.add_outer(v);
   return ans; }