Example #1
0
  ComparePredictionsOutput compare_predictions(const ConstVectorView &truth,
                                               const ConstVectorView &pred) {
    Matrix X(truth.size(), 2);
    X.col(0) = 1.0;
    X.col(1) = truth;
    RegressionModel model(X, pred);
    Vector null_residual = pred - truth;
    Vector beta = model.Beta();
    Vector alternative_residual = pred - X * beta;
    // Under the null hypothesis, residual and alternative_residual
    // will have the same distribution, but the alternative_residual
    // will have used two degrees of freedom, while the null reisdual
    // will have used zero.
    int n = truth.size();
    double SSE = alternative_residual.normsq();
    double SST = null_residual.normsq();
    double SSR = SST - SSE;
    double Fstat = (SSE / (n - 2)) / (SSR / 2.0);
    double p_value = pf(Fstat, n - 2, 2, false);
    ComparePredictionsOutput result;

    SpdMatrix xtx(2, 0.0);
    xtx.add_inner(X);
    Vector beta_standard_errors = sqrt(model.sigsq() * (xtx.inv().diag()));

    result.intercept = beta[0];
    result.intercept_se = beta_standard_errors[0];
    result.slope = beta[1];
    result.slope_se = beta_standard_errors[1];
    result.SSE = SSE;
    result.SST = SST;
    result.Fstat = Fstat;
    result.p_value = p_value;
    return result;
  }
Example #2
0
  void ArPosteriorSampler::draw_phi_univariate() {
    int p = model_->phi().size();
    Vec phi = model_->phi();
    if (!model_->check_stationary(phi)) {
      report_error("ArPosteriorSampler::draw_phi_univariate was called with an "
                   "illegal initial value of phi.  That should never happen.");
    }
    const Spd &xtx(model_->suf()->xtx());
    const Vec &xty(model_->suf()->xty());

    for (int i = 0; i < p; ++i) {
      double initial_phi = phi[i];

      double lo = -1;
      double hi = 1;

      // y - xb  y - xb
      //   bt xtx b  - 2 bt xty + yty

      //  bt xtx b = sum_i sum_j beta[i] beta[j] xtx[i, j]
      //           = beta [i]^2 xtx[i,i] + 2 * sum_{j != i} beta[i] xtx[i, j] * beta[j]
      //             - 2 * beta[i] * xty[i];

      // mean is (xty[i] - sum_{j != i}  )
      double ivar = xtx(i, i);
      double mu = (xty[i] - (phi.dot(xtx.col(i)) - phi[i] * xtx(i, i))) / ivar;
      bool ok = false;
      while (!ok) {
        double candidate = rtrun_norm_2_mt(rng(), mu, sqrt(1.0/ivar), lo, hi);
        phi[i] = candidate;
        if (ArModel::check_stationary(phi)) {
          ok = true;
        } else {
          if (candidate > initial_phi) hi = candidate;
          else lo = candidate;
        }
      }
    }
    model_->set_phi(phi);
  }
 Matrix MvRegSuf::conditional_beta_hat(const SelectorMatrix &included) const {
   Matrix ans(xdim(), ydim());
   std::map<Selector, Cholesky> chol_map;
   for (int i = 0; i < ydim(); ++i) {
     const Selector &inc(included.col(i));
     auto it = chol_map.find(inc);
     if (it == chol_map.end()) {
       chol_map[it->first] = Cholesky(inc.select(xtx()));
       it = chol_map.find(inc);
     } 
     ans.col(i) = inc.expand(it->second.solve(inc.select(xty_.col(i))));
   }
   return ans;
 }
Example #4
0
int main() {
    auto b = genRandomVector(kDim);

    Matrix xtx(kDim);
    Vector xty(kDim);

    {
        Timer tGen("Generate");
        constexpr u32 kVectorPoints = 4000 * kDim;
        for (u32 iVectorPoint = 0; iVectorPoint < kVectorPoints; ++iVectorPoint) {
            auto p = getRandomVectorPoint(b);
            for (size_t i = 0; i < p.dimension(); ++i) {
                for (size_t j = 0; j < p.dimension(); ++j) {
                    xtx.data_[i][j] += p.a_[i] * p.a_[j];
                }
            }
            for (size_t i = 0; i < p.dimension(); ++i) {
                xty[i] += p.a_[i] * p.b_;
            }
        }
    }

    {
        Timer tSolve("Invert");
        auto inv = xtx.invert();
        tSolve.finish();
        Vector bPrime = inv * xty;

        LOG(INFO) << OUT(b) << OUT(length(b));
        LOG(INFO) << OUT(bPrime) << OUT(length(bPrime));
        auto err = b - bPrime;
        LOG(INFO) << OUT(err) << OUT(length(err));
    }

    {
        Timer tSolve("Cholesky");
        auto chol = xtx.cholesky();
        tSolve.finish();
        Matrix xtx2 = chol*chol.transpose();
        LOG(INFO) << OUT(xtx.norm2()) << OUT(xtx2.norm2()) << OUT((xtx - xtx2).norm2());
    }

    return 0;
}
  double RM::Loglike(const Vector &sigsq_beta,
                     Vector &g, Matrix &h, uint nd)const{
    const double log2pi = 1.83787706640935;
    const double sigsq = sigsq_beta[0];
    const Vector b(ConstVectorView(sigsq_beta, 1));
    double n = suf()->n();
    if(b.size()==0) return empty_loglike(g, h, nd);

    double SSE = yty() - 2*b.dot(xty()) + xtx().Mdist(b);
    double ans =  -.5*(n * log2pi  + n *log(sigsq)+ SSE/sigsq);

    if(nd>0){  // sigsq derivs come first in CP2 vectorization
      SpdMatrix xtx = this->xtx();
      Vector gbeta = (xty() - xtx*b)/sigsq;
      double sig4 = sigsq*sigsq;
      double gsigsq = -n/(2*sigsq) + SSE/(2*sig4);
      g = concat(gsigsq, gbeta);
      if(nd>1){
        double h11 = .5*n/sig4 - SSE/(sig4*sigsq);
        h = unpartition(h11, (-1/sigsq)*gbeta, (-1/sigsq)*xtx);}}
    return ans;
  }
Example #6
0
DoubleVector linearRegression(const VectorPoints& points, double rigid) {
    assert(!points.empty());

    Matrix xtx(points[0].dimension());
    Vector xty(points[0].dimension());

    for (u32 iVectorPoint = 0; iVectorPoint < points.size(); ++iVectorPoint) {
        const auto& p = points[iVectorPoint];
        assert(p.dimension() == xty.size());
        for (size_t i = 0; i < p.dimension(); ++i) {
            for (size_t j = 0; j < p.dimension(); ++j) {
                xtx.data_[i][j] += p.a_[i] * p.a_[j];
            }
        }
        for (size_t i = 0; i < p.dimension(); ++i) {
            xty[i] += p.a_[i] * p.b_;
        }
    }
    for (size_t i = 0; i < xtx.dimension(); ++i) {
        xtx.data_[i][i] += rigid;
    }
    xtx /= points.size();
    for (auto& x : xty) {
        x /= points.size();
    }

    auto b = xtx.invert() * xty;

    // cout << OUT((xtx.invert() * xtx - Matrix::one(xtx.dimension())).norm2()) << endl;
    /*
    for (size_t i = 0; i < points.size(); ++i) {
        const auto& p = points[i];
        cout << OUT(p.b_) << OUT(dot(p.a_, b)) << OUT(p.a_[p.a_.size() - 2]) << endl;
    }
    */

    return b;
}
 SpdMatrix MvRegSuf::SSE(const Matrix &B) const {
   SpdMatrix ans = yty();
   ans.add_inner2(B, xty(), -1);
   ans += sandwich(B.transpose(), xtx());
   return ans;
 }
 SpdMatrix RM::xtx()const{ return xtx( coef().inc() ) ;}
 //============================================================
 SpdMatrix RegSuf::centered_xtx() const {
   SpdMatrix ans = xtx();
   ans.add_outer(xbar(), -n());
   return ans;
 }
 double NeRegSuf::SSE()const{
   SpdMatrix ivar = xtx().inv();
   return yty() - ivar.Mdist(xty()); }
 ostream & QrRegSuf::print(ostream &out)const{
   return out << "sumsqy = " << yty() << endl
              << "xty_ = " << xty() << endl
              << "xtx  = " << endl << xtx();
 }
 SpdMatrix QrRegSuf::xtx(const Selector &inc)const{
   //    if(!current) refresh_qr();
   return inc.select(xtx());
 }
 //======================================================================
 ostream & RegSuf::print(ostream &out)const{
   out << "sample size: " << n() << endl
       << "xty: " << xty() << endl
       << "xtx: " << endl << xtx();
   return out;
 }