void CParam::S1_e_it(CData &Data) { for (int i_SNP=0; i_SNP<n_SNP; i_SNP++){ for (int i=0; i<n_pheno; i++){ if ( ( Data.Y(i,i_SNP) > 0 ) & ( Data.Y(i,i_SNP) <= Data.threshold_on ) ){ // V 1.3.1 -> other case: e_it is fixed in CParam::Initialize, // i.e., e_it=0 if y_it<=0 and 1 if y_it>=Data.threshold_on double unnorm_logprob0 = 0.0 ; double unnorm_logprob1 = Beta(i,i) ; for (int j=0; j<n_pheno; j++){ if (G_mat(i,j)==1) unnorm_logprob1 = unnorm_logprob1 + Beta(i,j) * E_mat(j,i_SNP) ; } // Note: Not count G_mat(i,j)=0 or G_mat(i,j)=9,i.e., diagonal unnorm_logprob0 = unnorm_logprob0 + R::dnorm(Data.Y(i,i_SNP),0,1,1) ; // log = T unnorm_logprob1 = unnorm_logprob1 + R::dlnorm(Data.Y(i,i_SNP),mu_vec(i),sqrt(sig2_vec(i)),1) ; // log = T double prob_e_it = 1.0 / ( 1.0 + exp(unnorm_logprob0-unnorm_logprob1) ) ; // Note: p1 = f1/(f1+f0) = 1 / (1+f0/f1) = 1 / (1+exp(log f0 - log f1)) RandVec = Rcpp::runif(1,0,1) ; if ( prob_e_it >= RandVec(0) ){ E_mat(i,i_SNP) = 1 ; } else { E_mat(i,i_SNP) = 0 ; } } // To check loglikelihood if (E_mat(i,i_SNP)==1){ loglikelihood = loglikelihood + R::dlnorm(Data.Y(i,i_SNP),mu_vec(i),sqrt(sig2_vec(i)),1) ; } else { loglikelihood = loglikelihood + R::dnorm(Data.Y(i,i_SNP),0,1,1) ; } } } is_accept_vec(0) = 1 ; }
// initialize prior density of filter void TrackerKalman::initialize(const StatePosVel& mu, const StatePosVel& sigma, const double time) { ColumnVector mu_vec(6); SymmetricMatrix sigma_vec(6); sigma_vec = 0; for (unsigned int i=0; i<3; i++){ mu_vec(i+1) = mu.pos_[i]; mu_vec(i+4) = mu.vel_[i]; sigma_vec(i+1,i+1) = pow(sigma.pos_[i],2); sigma_vec(i+4,i+4) = pow(sigma.vel_[i],2); } prior_ = Gaussian(mu_vec, sigma_vec); filter_ = new ExtendedKalmanFilter(&prior_); // tracker initialized tracker_initialized_ = true; quality_ = 1; filter_time_ = time; init_time_ = time; }
void CParam::S3_sig2_i(CData &Data) { for (int i=0; i<n_pheno; i++){ arma::vec mu_i_onevec(n_SNP) ; mu_i_onevec.fill(mu_vec(i)) ; arma::vec e_i = E_mat.row(i).t() ; double n_i = sum(e_i) ; arma::vec logy_i = Data.logY.row(i).t() ; arma::vec logy_minus_mu = logy_i - mu_i_onevec ; arma::vec e1_logy_minus_mu = logy_minus_mu % e_i ; arma::vec sum_e1_squares = e1_logy_minus_mu.t() * e1_logy_minus_mu ; double a_star = Data.a_sigma + 0.5 * n_i ; double b_star = Data.b_sigma + 0.5 * sum_e1_squares(0) ; sig2_vec(i) = rinvgamma(a_star, b_star) ; } is_accept_vec(2) = 1 ; }
void CParam::S2_mu_i(CData &Data) { for (int i=0; i<n_pheno; i++){ double sig2_i = sig2_vec(i) ; arma::vec e_i = E_mat.row(i).t() ; double n_i = sum(e_i) ; arma::vec logy_i = Data.logY.row(i).t() ; arma::vec e1_logy = logy_i % e_i ; double sum_e1_logy = sum(e1_logy) ; // Note: % Schur product: element-wise multiplication of two objects // sum of log_y_it with e_it=1 double mean_star = (sig2_i * Data.theta_mu + Data.tau2_mu * sum_e1_logy) / (sig2_i + Data.tau2_mu * n_i) ; double var_star = (sig2_i * Data.tau2_mu)/(sig2_i + Data.tau2_mu * n_i) ; RandVec = Rcpp::rnorm(1, mean_star, sqrt(var_star)) ; mu_vec(i) = RandVec(0) ; } is_accept_vec(1) = 1 ; }
typename return_type<T_y, T_loc, T_covar>::type multi_normal_cholesky_log(const T_y& y, const T_loc& mu, const T_covar& L) { static const char* function("multi_normal_cholesky_log"); typedef typename scalar_type<T_covar>::type T_covar_elem; typedef typename return_type<T_y, T_loc, T_covar>::type lp_type; lp_type lp(0.0); VectorViewMvt<const T_y> y_vec(y); VectorViewMvt<const T_loc> mu_vec(mu); size_t size_vec = max_size_mvt(y, mu); int size_y = y_vec[0].size(); int size_mu = mu_vec[0].size(); if (size_vec > 1) { int size_y_old = size_y; int size_y_new; for (size_t i = 1, size_ = length_mvt(y); i < size_; i++) { int size_y_new = y_vec[i].size(); check_size_match(function, "Size of one of the vectors of " "the random variable", size_y_new, "Size of another vector of the " "random variable", size_y_old); size_y_old = size_y_new; } int size_mu_old = size_mu; int size_mu_new; for (size_t i = 1, size_ = length_mvt(mu); i < size_; i++) { int size_mu_new = mu_vec[i].size(); check_size_match(function, "Size of one of the vectors of " "the location variable", size_mu_new, "Size of another vector of the " "location variable", size_mu_old); size_mu_old = size_mu_new; } (void) size_y_old; (void) size_y_new; (void) size_mu_old; (void) size_mu_new; } check_size_match(function, "Size of random variable", size_y, "size of location parameter", size_mu); check_size_match(function, "Size of random variable", size_y, "rows of covariance parameter", L.rows()); check_size_match(function, "Size of random variable", size_y, "columns of covariance parameter", L.cols()); for (size_t i = 0; i < size_vec; i++) { check_finite(function, "Location parameter", mu_vec[i]); check_not_nan(function, "Random variable", y_vec[i]); } if (size_y == 0) return lp; if (include_summand<propto>::value) lp += NEG_LOG_SQRT_TWO_PI * size_y * size_vec; if (include_summand<propto, T_covar_elem>::value) lp -= L.diagonal().array().log().sum() * size_vec; if (include_summand<propto, T_y, T_loc, T_covar_elem>::value) { lp_type sum_lp_vec(0.0); for (size_t i = 0; i < size_vec; i++) { Eigen::Matrix<typename return_type<T_y, T_loc>::type, Eigen::Dynamic, 1> y_minus_mu(size_y); for (int j = 0; j < size_y; j++) y_minus_mu(j) = y_vec[i](j)-mu_vec[i](j); Eigen::Matrix<typename return_type<T_y, T_loc, T_covar>::type, Eigen::Dynamic, 1> half(mdivide_left_tri_low(L, y_minus_mu)); // FIXME: this code does not compile. revert after fixing subtract() // Eigen::Matrix<typename // boost::math::tools::promote_args<T_covar, // typename value_type<T_loc>::type, // typename value_type<T_y>::type>::type>::type, // Eigen::Dynamic, 1> // half(mdivide_left_tri_low(L, subtract(y, mu))); sum_lp_vec += dot_self(half); } lp -= 0.5*sum_lp_vec; } return lp; }