Beispiel #1
0
  double ZMMM::loglike()const{
    const double log2pi = 1.83787706641;
    double dim = mu_.size();
    double n = suf()->n();
    const Vec ybar = suf()->ybar();
    const Spd sumsq = suf()->center_sumsq();

    double qform = n*(siginv().Mdist(ybar));
    qform+= traceAB(siginv(), sumsq);

    double nc = 0.5*n*( -dim*log2pi + ldsi());

    double ans = nc - .5*qform;
    return ans;
  }
Beispiel #2
0
  double MVT::loglike(const Vector &mu_siginv_triangle_nu) const {
    const DatasetType &dat(this->dat());
    const ConstVectorView mu(mu_siginv_triangle_nu, 0, dim());
    SpdMatrix siginv(dim());
    Vector::const_iterator it = mu_siginv_triangle_nu.cbegin() + dim();
    siginv.unvectorize(it, true);
    double ldsi = siginv.logdet();
    double nu = mu_siginv_triangle_nu.back();
    double lognu = log(nu);

    const double logpi = 1.1447298858494;
    uint n = dat.size();
    uint d = mu.size();
    double half_npd = .5 * (nu + d);

    double ans = lgamma(half_npd) - lgamma(nu / 2) - .5 * d * (lognu + logpi);
    ans += .5 * ldsi + half_npd * lognu;
    ans *= n;

    for (uint i = 0; i < n; ++i) {
      double delta = siginv.Mdist(mu, dat[i]->value());
      ans -= half_npd * log(nu + delta / nu);
    }

    return ans;
  }
 double WeightedMvnModel::pdf(Ptr<WeightedVectorData> dp, bool logscale)const{
   double w = dp->weight();
   const Vector &y(dp->value());
   uint p = mu().size();
   double wldsi = p*log(w) +  ldsi();
   return dmvn(y, mu(), w*siginv(), wldsi, logscale);
 }
  //----------------------------------------------------------------------
  void BLCSSS::rwm_draw_chunk(int chunk){
    clock_t start = clock();
    const Selector &inc(m_->coef().inc());
    int nvars = inc.nvars();
    Vec full_nonzero_beta = m_->beta();   // only nonzero components
    // Compute information matrix for proposal distribution.  For
    // efficiency, also compute the log-posterior of the current beta.
    Vec mu(inc.select(pri_->mu()));
    Spd siginv(inc.select(pri_->siginv()));
    double original_logpost = dmvn(full_nonzero_beta, mu, siginv, 0, true);

    const std::vector<Ptr<BinomialRegressionData> > &data(m_->dat());
    int nobs = data.size();

    int full_chunk_size = compute_chunk_size();
    int chunk_start = chunk * full_chunk_size;
    int elements_remaining = nvars - chunk_start;
    int this_chunk_size = std::min(elements_remaining, full_chunk_size);
    Selector chunk_selector(nvars, false);
    for(int i = chunk_start; i< chunk_start + this_chunk_size; ++i) {
      chunk_selector.add(i);
    }

    Spd proposal_ivar = chunk_selector.select(siginv);

    for(int i = 0; i < nobs; ++i){
      Vec x = inc.select(data[i]->x());
      double eta = x.dot(full_nonzero_beta);
      double prob = plogis(eta);
      double weight = prob * (1-prob);
      VectorView x_chunk(x, chunk_start, this_chunk_size);
      // Only upper triangle is accessed.  Need to reflect at end of loop.
      proposal_ivar.add_outer(x_chunk, weight, false);
      int yi = data[i]->y();
      int ni = data[i]->n();
      original_logpost += dbinom(yi, ni, prob, true);
    }
    proposal_ivar.reflect();
    VectorView beta_chunk(full_nonzero_beta, chunk_start, this_chunk_size);
    if(tdf_ > 0){
      beta_chunk = rmvt_ivar_mt(
          rng(), beta_chunk, proposal_ivar / rwm_variance_scale_factor_, tdf_);
    }else{
      beta_chunk = rmvn_ivar_mt(
          rng(), beta_chunk, proposal_ivar / rwm_variance_scale_factor_);
    }

    double logpost = dmvn(full_nonzero_beta, mu, siginv, 0, true);
    Vec full_beta(inc.expand(full_nonzero_beta));
    logpost += m_->log_likelihood(full_beta, 0, 0, false);
    double log_alpha = logpost - original_logpost;
    double logu = log(runif_mt(rng()));
    ++rwm_chunk_attempts_;
    if(logu < log_alpha){
      m_->set_beta(full_nonzero_beta);
      ++rwm_chunk_successes_;
    }
    clock_t end = clock();
    rwm_chunk_times_ += double(end - start) / CLOCKS_PER_SEC;
  }
Beispiel #5
0
 double MGSS::loglike(const Vector &mu_ominv)const{
   const ConstVectorView mu(mu_ominv, 0, dim());
   SpdMatrix siginv(dim());
   Vector::const_iterator b(mu_ominv.cbegin() + dim());
   siginv.unvectorize(b, true);
   siginv /= sigsq();
   return MvnBase::log_likelihood(Vector(mu), siginv, *suf());
 }
 double HPRIPS::logpri()const{
   const SpdMatrix & siginv(data_parent_model()->siginv());
   double ans = 0;
   for (int i = 0; i < nrow(siginv); ++i) {
     ans += siginv_priors_[i]->logp(siginv(i, i));
   }
   return ans;
 }
 double MvReg::loglike(const Vector &beta_siginv) const {
   Matrix Beta(xdim(), ydim());
   Vector::const_iterator it = beta_siginv.cbegin();
   std::copy(it, it + Beta.size(), Beta.begin());
   it += Beta.size();
   SpdMatrix siginv(ydim());
   siginv.unvectorize(it, true);
   return log_likelihood_ivar(Beta, siginv);
 }
  //----------------------------------------------------------------------
  void BLCSSS::rwm_draw_chunk(int chunk){
    const Selector &inc(m_->coef().inc());
    int nvars = inc.nvars();
    Vector full_nonzero_beta = m_->included_coefficients();
    // Compute information matrix for proposal distribution.  For
    // efficiency, also compute the log-posterior of the current beta.
    Vector mu(inc.select(pri_->mu()));
    SpdMatrix siginv(inc.select(pri_->siginv()));
    double original_logpost = dmvn(full_nonzero_beta, mu, siginv, 0, true);

    const std::vector<Ptr<BinomialRegressionData> > &data(m_->dat());
    int nobs = data.size();

    int full_chunk_size = compute_chunk_size(max_rwm_chunk_size_);
    int chunk_start = chunk * full_chunk_size;
    int elements_remaining = nvars - chunk_start;
    int this_chunk_size = std::min(elements_remaining, full_chunk_size);
    Selector chunk_selector(nvars, false);
    for(int i = chunk_start; i< chunk_start + this_chunk_size; ++i) {
      chunk_selector.add(i);
    }

    SpdMatrix proposal_ivar = chunk_selector.select(siginv);

    for(int i = 0; i < nobs; ++i){
      Vector x = inc.select(data[i]->x());
      double eta = x.dot(full_nonzero_beta);
      double prob = plogis(eta);
      double weight = prob * (1-prob);
      VectorView x_chunk(x, chunk_start, this_chunk_size);
      // Only upper triangle is accessed.  Need to reflect at end of loop.
      proposal_ivar.add_outer(x_chunk, weight, false);
      original_logpost += dbinom(data[i]->y(), data[i]->n(), prob, true);
    }
    proposal_ivar.reflect();
    VectorView beta_chunk(full_nonzero_beta, chunk_start, this_chunk_size);
    if(tdf_ > 0){
      beta_chunk = rmvt_ivar_mt(
          rng(), beta_chunk, proposal_ivar / rwm_variance_scale_factor_, tdf_);
    }else{
      beta_chunk = rmvn_ivar_mt(
          rng(), beta_chunk, proposal_ivar / rwm_variance_scale_factor_);
    }

    double logpost = dmvn(full_nonzero_beta, mu, siginv, 0, true);
    Vector full_beta(inc.expand(full_nonzero_beta));
    logpost += m_->log_likelihood(full_beta, 0, 0, false);
    double log_alpha = logpost - original_logpost;
    double logu = log(runif_mt(rng()));
    if (logu < log_alpha) {
      m_->set_included_coefficients(full_nonzero_beta);
      move_accounting_.record_acceptance("rwm_chunk");
    } else {
      move_accounting_.record_rejection("rwm_chunk");
    }
  }
Beispiel #9
0
  void MVT::Impute(bool sample, RNG &rng) {
    std::vector<Ptr<WVD> > &V(mvn->dat());

    for (uint i = 0; i < V.size(); ++i) {
      Ptr<WVD> d = V[i];
      const Vector &y(d->value());
      double delta = siginv().Mdist(y, mu());
      double a = (nu() + y.length()) / 2.0;
      double b = (nu() + delta) / 2.0;
      double w = sample ? rgamma_mt(rng, a, b) : a / b;
      d->set_weight(w);
    }
    mvn->refresh_suf();
    wgt->refresh_suf();
  }
  double WeightedMvnModel::loglike(const Vector &mu_siginv_triangle)const{
    const double log2pi = 1.83787706641;
    const ConstVectorView mu(mu_siginv_triangle, 0, dim());
    SpdMatrix siginv(dim());
    Vector::const_iterator it = mu_siginv_triangle.begin() + dim();
    siginv.unvectorize(it, true);
    double ldsi = siginv.logdet();

    double sumlogw = suf()->sumlogw();
    const SpdMatrix sumsq = suf()->center_sumsq();
    double n = suf()->n();

    double ans = n*.5*(log2pi + ldsi) + dim() * .5 *sumlogw;
    ans -= -.5*traceAB(siginv, suf()->center_sumsq(mu));
    return ans;
  }
Beispiel #11
0
 double MGSS::pdf(Ptr<Data> dp, bool logscale)const{
   const Vec &y(DAT(dp)->value());
   return dmvn(y, mu(), siginv(), ldsi(), logscale);
 }
Beispiel #12
0
 double MVT::pdf(const Vector &x, bool logscale) const {
   return dmvt(x, mu(), siginv(), nu(), ldsi(), logscale);
 }
Beispiel #13
0
 double ZMMM::pdf(Ptr<Data> dp, bool logscale)const{
   Ptr<VectorData> dpp = DAT(dp);
   return dmvn_zero_mean(dpp->value(), siginv(), ldsi(), logscale);
 }