T Selector::sub_select(const T &x, const Selector &rhs) const { assert(rhs.nvars() <= this->nvars()); assert(this->covers(rhs)); Selector tmp(nvars(), false); for (uint i = 0; i < rhs.nvars(); ++i) { tmp.add(INDX(rhs.indx(i))); } return tmp.select(x); }
double BLSSS::log_model_prob(const Selector &g)const{ // borrowed from MLVS.cpp double num = vpri_->logp(g); if(num==BOOM::negative_infinity() || g.nvars() == 0) { // If num == -infinity then it is in a zero support point in the // prior. If g.nvars()==0 then all coefficients are zero // because of the point mass. The only entries remaining in the // likelihood are sums of squares of y[i] that are independent // of g. They need to be omitted here because they are omitted // in the non-empty case below. return num; } SpdMatrix ivar = g.select(pri_->siginv()); num += .5*ivar.logdet(); if(num == BOOM::negative_infinity()) return num; Vector mu = g.select(pri_->mu()); Vector ivar_mu = ivar * mu; num -= .5*mu.dot(ivar_mu); bool ok=true; ivar += g.select(suf().xtx()); Matrix L = ivar.chol(ok); if(!ok) return BOOM::negative_infinity(); double denom = sum(log(L.diag())); // = .5 log |ivar| Vector S = g.select(suf().xty()) + ivar_mu; Lsolve_inplace(L,S); denom-= .5*S.normsq(); // S.normsq = beta_tilde ^T V_tilde beta_tilde return num-denom; }
void BLSSS::draw_beta() { Selector g = m_->coef().inc(); if(g.nvars() == 0) { m_->drop_all(); return; } SpdMatrix ivar = g.select(pri_->siginv()); Vector ivar_mu = ivar * g.select(pri_->mu()); ivar += g.select(suf().xtx()); ivar_mu += g.select(suf().xty()); Vector b = ivar.solve(ivar_mu); b = rmvn_ivar_mt(rng(), b, ivar); // If model selection is turned off and some elements of beta // happen to be zero (because, e.g., of a failed MH step) we don't // want the dimension of beta to change. m_->set_included_coefficients(b, g); }
void BLSSS::draw_beta() { Selector g = model_->coef().inc(); if (g.nvars() == 0) { model_->drop_all(); return; } SpdMatrix precision = g.select(slab_->siginv()); Vector scaled_mean = precision * g.select(slab_->mu()); precision += g.select(suf().xtx()); Cholesky precision_cholesky_factor(precision); scaled_mean += g.select(suf().xty()); Vector posterior_mean = precision_cholesky_factor.solve(scaled_mean); Vector beta = rmvn_precision_upper_cholesky_mt( rng(), posterior_mean, precision_cholesky_factor.getLT()); // If model selection is turned off and some elements of beta // happen to be zero (because, e.g., of a failed MH step) we don't // want the dimension of beta to change. model_->set_included_coefficients(beta, g); }