SpdMatrix WMS::center_sumsq(const Vector &mu)const{ SpdMatrix ans = sumsq(); // sum wyy^T ans.add_outer(mu, sumw()); // wyyT + w.mu.muT ans -= as_symmetric(mu.outer(sum_, 2)); return ans; }
double BLSSS::log_model_prob(const Selector &g)const{ // borrowed from MLVS.cpp double num = vpri_->logp(g); if(num==BOOM::negative_infinity() || g.nvars() == 0) { // If num == -infinity then it is in a zero support point in the // prior. If g.nvars()==0 then all coefficients are zero // because of the point mass. The only entries remaining in the // likelihood are sums of squares of y[i] that are independent // of g. They need to be omitted here because they are omitted // in the non-empty case below. return num; } SpdMatrix ivar = g.select(pri_->siginv()); num += .5*ivar.logdet(); if(num == BOOM::negative_infinity()) return num; Vector mu = g.select(pri_->mu()); Vector ivar_mu = ivar * mu; num -= .5*mu.dot(ivar_mu); bool ok=true; ivar += g.select(suf().xtx()); Matrix L = ivar.chol(ok); if(!ok) return BOOM::negative_infinity(); double denom = sum(log(L.diag())); // = .5 log |ivar| Vector S = g.select(suf().xty()) + ivar_mu; Lsolve_inplace(L,S); denom-= .5*S.normsq(); // S.normsq = beta_tilde ^T V_tilde beta_tilde return num-denom; }
void DM::sandwich_inplace(SpdMatrix &m) const { assert((nrow() == m.nrow()) && (m.ncol() == ncol())); for (int i = 0; i < nrow(); ++i) { m.row(i) *= diagonal_elements_[i]; m.col(i) *= diagonal_elements_[i]; } }
//--------------------------------------------------------------------------- SpdMatrix Marginal::forecast_precision() const { const Selector &observed(model_->observed_status(time_index())); DiagonalMatrix observation_precision = model_->observation_variance(time_index()).inv(); const SparseKalmanMatrix *observation_coefficients( model_->observation_coefficients(time_index(), observed)); SpdMatrix variance; if (previous()) { variance = previous()->state_variance(); } else { variance = model_->initial_state_variance(); } // 'inner' is I + P * Z' Hinv Z Matrix inner = variance * observation_coefficients->inner( observation_precision.diag()); inner.diag() += 1.0; SpdMatrix outer = inner.solve(variance); SpdMatrix ans = observation_precision.sandwich( observation_coefficients->sandwich(outer)); ans *= -1; ans.diag() += observation_precision.diag(); return ans; }
int main(int argc, char **argv) { SpdMatrix *pMat; /* * 1st test */ pMat = new SpdMatrix(4, 0); pMat->setEntry(0, 0, 9); pMat->setEntry(0, 1, 3); pMat->setEntry(0, 2, -6); pMat->setEntry(0, 3, 12); pMat->setEntry(1, 1, 26); pMat->setEntry(1, 2, -7); pMat->setEntry(1, 3, -11); pMat->setEntry(2, 2, 9); pMat->setEntry(2, 3, 7); pMat->setEntry(3, 3, 65); cout << "Original matrix:" << endl; pMat->print(); SquareMatrix l(pMat->chol()); // L matrix should be: // 3 0 0 0 // 1 5 0 0 // -2 -1 2 0 // 4 -3 6 2 cout << "L matrix:" << endl; l.print(); delete pMat; }
//--------------------------------------------------------------------------- SpdMatrix Marginal::direct_forecast_precision() const { SpdMatrix variance; if (previous()) { variance = previous()->state_variance(); } else { variance = model_->initial_state_variance(); } const Selector &observed(model_->observed_status(time_index())); SpdMatrix ans = model_->observation_coefficients( time_index(), observed)->sandwich(variance); ans.diag() += model_->observation_variance(time_index()).diag(); return ans.inv(); }
SpdMatrix sandwich(const Matrix &A, const SpdMatrix &V){ // AVA^T Matrix tmp(A.nrow(), V.ncol()); cblas_dsymm(CblasColMajor, CblasRight, CblasUpper, tmp.nrow(), tmp.ncol(), 1.0, V.data(), V.nrow(), A.data(), A.nrow(), 0.0, tmp.data(), tmp.nrow()); return matmultT(tmp, A); }
//====================================================================== void ArStateModel::set_initial_state_variance(const SpdMatrix &Sigma){ if(Sigma.nrow() != state_dimension()){ report_error("attempt to set Sigma to the wrong size in " "ArStateModel::set_initial_state_mean"); } initial_state_variance_ = Sigma; }
double dmvn_zero_mean(const Vector &y, const SpdMatrix &Siginv, double ldsi, bool logscale) { const double log2pi = 1.83787706641; double n = y.size(); double ans = 0.5 * (ldsi - Siginv.Mdist(y) - n * log2pi); return logscale ? ans : std::exp(ans); }
// The likelihood is \prod root(2pi)^-d |siginv|^{n/2} exp{-1/2 * trace(qform)} double MvReg::log_likelihood_ivar(const Matrix &Beta, const SpdMatrix &Siginv) const { double qform = trace(suf()->SSE(Beta) * Siginv); double n = suf()->n(); double normalizing_constant = -.5 * (n * ydim()) * Constants::log_2pi; return normalizing_constant + .5 * n * Siginv.logdet() - .5 * qform; }
//====================================================================== double dmvt(const Vector &x, const Vector &mu, const SpdMatrix &Siginv, double nu, double ldsi, bool logscale){ long dim = mu.size(); double nc = lgamma( (nu + dim)/2.0 ) + .5 * ldsi - lgamma(nu/2.0) - (.5*dim) * (log(nu) + Constants::log_pi); double delta = Siginv.Mdist(x, mu); double ans = nc - .5*(nu + dim)*(::log1p(delta/nu)); return logscale ? ans : exp(ans); }
void BLSSS::draw_beta() { Selector g = m_->coef().inc(); if(g.nvars() == 0) { m_->drop_all(); return; } SpdMatrix ivar = g.select(pri_->siginv()); Vector ivar_mu = ivar * g.select(pri_->mu()); ivar += g.select(suf().xtx()); ivar_mu += g.select(suf().xty()); Vector b = ivar.solve(ivar_mu); b = rmvn_ivar_mt(rng(), b, ivar); // If model selection is turned off and some elements of beta // happen to be zero (because, e.g., of a failed MH step) we don't // want the dimension of beta to change. m_->set_included_coefficients(b, g); }
Vector rmvn_ivar_mt(RNG &rng, const Vector &mu, const SpdMatrix &ivar) { // Draws a multivariate normal with mean mu and precision matrix // ivar. bool ok = false; Matrix U = ivar.chol(ok).transpose(); if (!ok) { report_error("Cholesky decomposition failed in rmvn_ivar_mt."); } return rmvn_precision_upper_cholesky_mt(rng, mu, U); }
WM::WishartModel(double pri_df, const SpdMatrix &PriVarEst) : ParamPolicy(new UnivParams(pri_df), new SpdParams(PriVarEst*pri_df)), DataPolicy(new WS(PriVarEst.nrow())), PriorPolicy() { Chol chol(sumsq()); if (!chol.is_pos_def()) { report_error("Sum of squares matrix must be positive definite in " "WishartModel constructor"); } }
SpdMatrix select(const SpdMatrix &S, const std::vector<bool> &inc, uint nvars){ SpdMatrix ans(nvars); uint I=0; for(uint i=0; i<nvars; ++i){ if(inc[i]){ uint J=0; for(uint j=0; j<nvars; ++j){ if(inc[j]){ ans.unchecked(I,J) = S.unchecked(i,j); ++J; }} ++I;}} return ans; }
Vector rmvn_robust_mt(RNG &rng, const Vector &mu, const SpdMatrix &V) { uint n = V.nrow(); Matrix eigenvectors(n, n); Vector eigenvalues = eigen(V, eigenvectors); for (uint i = 0; i < n; ++i) { // We're guaranteed that eigenvalues[i] is real and non-negative. We // can take the absolute value of eigenvalues[i] to guard against // spurious negative numbers close to zero. eigenvalues[i] = sqrt(fabs(eigenvalues[i])) * rnorm_mt(rng, 0, 1); } Vector ans(eigenvectors * eigenvalues); ans += mu; return ans; }
void DRSM::set_initial_state_variance(const SpdMatrix &V) { check_size(V.nrow()); initial_state_variance_ = V; }
//====================================================================== double dmvt(const Vector &x, const Vector &mu, const SpdMatrix &Siginv, double nu, bool logscale){ double ldsi = Siginv.logdet(); return dmvt(x, mu, Siginv, nu, ldsi, logscale); }
Matrix operator*(const SpdMatrix &m1, const DiagonalMatrix &m2) { Matrix ans; return m1.mult(m2, ans); }
Matrix chol(const SpdMatrix &S, bool & ok){return S.chol(ok);}
Matrix chol(const SpdMatrix &S){ return S.chol();}
double dmvn(const Vector &y, const Vector &mu, const SpdMatrix &Siginv, bool logscale) { double ldsi = Siginv.logdet(); return dmvn(y, mu, Siginv, ldsi, logscale); }
inline void zero_upper(SpdMatrix &V){ uint n = V.nrow(); for(uint i=0; i<n; ++i){ dVector::iterator b = V.col_begin(i); dVector::iterator e = b+i; std::fill(b,e,0.0);}}
SpdMatrix MvRegSuf::SSE(const Matrix &B) const { SpdMatrix ans = yty(); ans.add_inner2(B, xty(), -1); ans += sandwich(B.transpose(), xtx()); return ans; }
Vector rmvn_mt(RNG &rng, const Vector &mu, const SpdMatrix &V) { bool okay = true; Matrix L = V.chol(okay); if (okay) return rmvn_L_mt(rng, mu, L); return rmvn_robust_mt(rng, mu, V); }
double NeRegSuf::SSE()const{ SpdMatrix ivar = xtx().inv(); return yty() - ivar.Mdist(xty()); }
void local_add_outer(SpdMatrix &S, const V &v, double w){ assert(v.size()==S.nrow()); cblas_dsyr(CblasColMajor, CblasUpper, v.size(), w, v.data(), v.stride(), S.data(), S.nrow()); }
//============================================================ SpdMatrix RegSuf::centered_xtx() const { SpdMatrix ans = xtx(); ans.add_outer(xbar(), -n()); return ans; }