inline arma_warn_unused typename T1::elem_type sum(const Op<T1, op_sum>& in) { arma_extra_debug_sigprint(); arma_extra_debug_print("sum(): two consecutive sum() calls detected"); return accu(in.m); }
double HMM<Distribution>::LogLikelihood(const arma::mat& dataSeq) const { arma::mat forward; arma::vec scales; Forward(dataSeq, scales, forward); // The log-likelihood is the log of the scales for each time step. return accu(log(scales)); }
::libmaus2::autoarray::AutoArray < std::pair<uint64_t,uint64_t> > computeSegAccu() const { ::libmaus2::autoarray::AutoArray<uint64_t> preaccu(index.size()+1); for ( uint64_t i = 0; i < index.size(); ++i ) preaccu[i] = index[i].size(); preaccu.prefixSums(); ::libmaus2::autoarray::AutoArray < std::pair<uint64_t,uint64_t> > accu(index.size()); for ( uint64_t i = 1; i < preaccu.size(); ++i ) accu[i-1] = std::pair<uint64_t,uint64_t>(preaccu[i-1],preaccu[i]); return accu; }
// Posterior Density Function to sample Theta double f_theta( colvec logTheta, colvec mTheta, mat Otheta, double Tau, mat Y, mat Fit, rowvec Sigma, double logEHRTime ) { double prior, like, post; colvec cTheta = ( logTheta - mTheta ) ; prior = - 0.5 * Tau * as_scalar( cTheta.t() * Otheta * cTheta ); mat D = diagmat( 1 / Sigma ); like = - 0.5 * accu( pow ( log( Y ) - log( Fit ), 2 ) * D ); // Need to figure out what to do with Sigma // Conditional Posterior ----------------------------------------------------------- post = prior + like + Rf_dnorm4( logEHRTime, 0, 1, 1 ); return post; }
void SPF::update_shape(int user, int item, int rating) { sp_fmat phi_SF = logtau.col(user) % data->ratings.col(item); double phi_sum = accu(phi_SF); fmat phi_MF; float phi_B = 0; // we don't need to do a similar check for factor only because // sparse matrices play nice when empty if (!settings->social_only) { phi_MF = exp(logtheta.col(user) + logbeta.col(item)); phi_sum += accu(phi_MF); } if (settings->item_bias) { phi_B = delta(item); phi_sum += phi_B; } if (phi_sum == 0) return; if (!settings->factor_only & !settings->fix_influence) { phi_SF /= phi_sum * rating; int neighbor; for (int n = 0; n < data->neighbor_count(user); n++) { neighbor = data->get_neighbor(user, n); a_tau(neighbor, user) += phi_SF(neighbor, 0); } } if (!settings->social_only) { phi_MF /= phi_sum * rating; a_theta.col(user) += phi_MF; a_beta_user.col(item) += phi_MF * scale; } if (settings->item_bias) { a_delta(item) += (phi_B / (phi_sum * rating)) * scale; } }
/** * Define the discrete distribution as having the given probabilities for each * observation. * * @param probabilities Probabilities of each possible observation. */ DiscreteDistribution(const arma::vec& probabilities) { // We must be sure that our distribution is normalized. double sum = accu(probabilities); if (sum > 0) this->probabilities = probabilities / sum; else { this->probabilities.set_size(probabilities.n_elem); this->probabilities.fill(1 / probabilities.n_elem); } }
double getPredictionAccuracy(mat& predictions, mat& labels){ int m = predictions.n_rows; double accuracy = 0; mat anticipations = round(predictions); /* For each prediction */ for (int i = 0; i < m; i++){ /* If tere is no difference between rounded prediction and label*/ if (accu(labels.row(i) - anticipations.row(i)) == 0) accuracy += 1; } return (accuracy / m) * 100; }
//' Samples from a Dirichlet distribution given a hyperparameter //' //' @param num_elements the dimention of the Dirichlet distribution //' @param alpha the hyperparameter vector (a column vector) //' //' @return returns a Dirichlet sample (a column vector) //' //' @note //' Author: Clint P. George //' //' Created on: 2014 //' //' @family utils //' //' @export // [[Rcpp::export]] arma::vec sample_dirichlet(unsigned int num_elements, arma::vec alpha){ arma::vec dirichlet_sample = arma::zeros<arma::vec>(num_elements); for ( register unsigned int i = 0; i < num_elements; i++ ) dirichlet_sample(i) = rgamma(1, alpha(i), 1.0)(0); // R::rgamma(1, alpha(i)); dirichlet_sample /= accu(dirichlet_sample); return dirichlet_sample; }
void HMM<Distribution>::Forward(const arma::mat& dataSeq, arma::vec& scales, arma::mat& forwardProb) const { // Our goal is to calculate the forward probabilities: // P(X_k | o_{1:k}) for all possible states X_k, for each time point k. forwardProb.zeros(transition.n_rows, dataSeq.n_cols); scales.zeros(dataSeq.n_cols); // The first entry in the forward algorithm uses the initial state // probabilities. Note that MATLAB assumes that the starting state (at // t = -1) is state 0; this is not our assumption here. To force that // behavior, you could append a single starting state to every single data // sequence and that should produce results in line with MATLAB. for (size_t state = 0; state < transition.n_rows; state++) forwardProb(state, 0) = initial(state) * emission[state].Probability(dataSeq.unsafe_col(0)); // Then normalize the column. scales[0] = accu(forwardProb.col(0)); forwardProb.col(0) /= scales[0]; // Now compute the probabilities for each successive observation. for (size_t t = 1; t < dataSeq.n_cols; t++) { for (size_t j = 0; j < transition.n_rows; j++) { // The forward probability of state j at time t is the sum over all states // of the probability of the previous state transitioning to the current // state and emitting the given observation. forwardProb(j, t) = accu(forwardProb.col(t - 1) % trans(transition.row(j))) * emission[j].Probability(dataSeq.unsafe_col(t)); } // Normalize probability. scales[t] = accu(forwardProb.col(t)); forwardProb.col(t) /= scales[t]; } }
inline arma_warn_unused eT mean(const subview_row<eT>& A) { arma_extra_debug_sigprint(); arma_debug_check( (A.n_elem == 0), "mean(): given object has no elements" ); const eT mu = accu(A) / eT(A.n_cols); return is_finite(mu) ? mu : op_mean::direct_mean_robust(A); }
inline typename enable_if2 < (is_arma_sparse_type<T1>::value == true) && (resolves_to_sparse_vector<T1>::value == true), typename T1::elem_type >::result sum(const T1& x) { arma_extra_debug_sigprint(); // sum elements return accu(x); }
void HMM<Distribution>::Forward(const arma::mat& dataSeq, arma::vec& scales, arma::mat& forwardProb) const { // Our goal is to calculate the forward probabilities: // P(X_k | o_{1:k}) for all possible states X_k, for each time point k. forwardProb.zeros(transition.n_rows, dataSeq.n_cols); scales.zeros(dataSeq.n_cols); // Starting state (at t = -1) is assumed to be state 0. This is what MATLAB // does in their hmmdecode() function, so we will emulate that behavior. for (size_t state = 0; state < transition.n_rows; state++) forwardProb(state, 0) = transition(state, 0) * emission[state].Probability(dataSeq.unsafe_col(0)); // Then normalize the column. scales[0] = accu(forwardProb.col(0)); forwardProb.col(0) /= scales[0]; // Now compute the probabilities for each successive observation. for (size_t t = 1; t < dataSeq.n_cols; t++) { for (size_t j = 0; j < transition.n_rows; j++) { // The forward probability of state j at time t is the sum over all states // of the probability of the previous state transitioning to the current // state and emitting the given observation. forwardProb(j, t) = accu(forwardProb.col(t - 1) % trans(transition.row(j))) * emission[j].Probability(dataSeq.unsafe_col(t)); } // Normalize probability. scales[t] = accu(forwardProb.col(t)); forwardProb.col(t) /= scales[t]; } }
sp_mat make_sample_basis(uint N, uint K){ sp_mat basis = sp_mat(N,K); set<uword> keys; uvec samples = randi<uvec>(K,distr_param(0,N-1)); for(uint k = 0; k < K; k++){ while(keys.count(samples(k)) > 0){ samples(k) = randi<uvec>(1,distr_param(0,N-1))(0); } basis(samples(k),k) = 1; keys.insert(samples(k)); } assert(K == accu(basis)); return basis; // Should be orthonormal by default }
//' A speedy sampling from a multimomial distribution //' //' @param theta a multinomial probability vector (K x 1 vector) //' //' @return returns a class index from [0, K) //' //' @note //' Author: Clint P. George //' //' Created on: February 11, 2016 //' //' @family utils //' //' @export // [[Rcpp::export]] unsigned int sample_multinomial (arma::vec theta) { unsigned int t = 0; double total_prob = accu(theta); double u = runif(1)(0) * total_prob; double cumulative_prob = theta(0); while(u > cumulative_prob){ t++; cumulative_prob += theta(t); } return t; }
::libmaus2::autoarray::AutoArray< std::pair<uint64_t,uint64_t> > computeSegmentAccu() { uint64_t const numint = index.size(); ::libmaus2::autoarray::AutoArray<uint64_t> preaccu(numint+1); uint64_t k = 0; for ( uint64_t i = 0; i < index.size(); ++i ) preaccu[k++] = index[i].size(); preaccu.prefixSums(); ::libmaus2::autoarray::AutoArray< std::pair<uint64_t,uint64_t> > accu(numint); for ( uint64_t i = 1; i < preaccu.size(); ++i ) accu[i-1] = std::pair<uint64_t,uint64_t>( std::pair<uint64_t,uint64_t>(preaccu[i-1],preaccu[i]) ); return accu; }
double Connectome::transitivity(const mat &W) { /* Transitivity is the ratio of 'triangles to triplets' in the network. (A classical version of the clustering coefficient). Input: W weighted undirected connection matrix Output: T transitivity scalar Reference: Onnela et al. (2005) Phys Rev E 71:065103 */ rowvec K = degree(W); mat t = arma::pow(W,(1.0/3.0)); vec cyc3 = diagvec(t*t*t); return accu(cyc3)/sum(K%(K-1)); }
static inline void UpdateGradient(arma::mat& s, const arma::mat& rrt, const std::vector<MatrixType>& ais, const arma::vec& bis, const arma::vec& lambda, const size_t lambdaOffset, const double sigma) { for (size_t i = 0; i < ais.size(); ++i) { const double constraint = accu(ais[i] % rrt) - bis[i]; const double y = lambda[lambdaOffset + i] - sigma * constraint; s -= y * ais[i]; } }
inline arma_warn_unused typename T1::elem_type sum ( const T1& X, const arma_empty_class junk1 = arma_empty_class(), const typename enable_if< resolves_to_vector<T1>::value == true >::result* junk2 = 0 ) { arma_extra_debug_sigprint(); arma_ignore(junk1); arma_ignore(junk2); return accu(X); }
double HMM<Distribution>::Estimate(const arma::mat& dataSeq, arma::mat& stateProb, arma::mat& forwardProb, arma::mat& backwardProb, arma::vec& scales) const { // First run the forward-backward algorithm. Forward(dataSeq, scales, forwardProb); Backward(dataSeq, scales, backwardProb); // Now assemble the state probability matrix based on the forward and backward // probabilities. stateProb = forwardProb % backwardProb; // Finally assemble the log-likelihood and return it. return accu(log(scales)); }
static inline void UpdateObjective(double& objective, const arma::mat& rrt, const std::vector<MatrixType>& ais, const arma::vec& bis, const arma::vec& lambda, const size_t lambdaOffset, const double sigma) { for (size_t i = 0; i < ais.size(); ++i) { // Take the trace subtracted by the b_i. const double constraint = accu(ais[i] % rrt) - bis[i]; objective -= (lambda[lambdaOffset + i] * constraint); objective += (sigma / 2.) * constraint * constraint; } }
::libmaus2::autoarray::AutoArray< std::pair<uint64_t,uint64_t> > computeSymAccu() { uint64_t numint = 0; for ( uint64_t i = 0; i < index.size(); ++i ) numint += index[i].size(); ::libmaus2::autoarray::AutoArray<uint64_t> preaccu(numint+1); uint64_t k = 0; for ( uint64_t i = 0; i < index.size(); ++i ) for ( uint64_t j = 0; j < index[i].size(); ++j ) preaccu[k++] = index[i][j].vcnt; preaccu.prefixSums(); ::libmaus2::autoarray::AutoArray< std::pair<uint64_t,uint64_t> > accu(numint); for ( uint64_t i = 1; i < preaccu.size(); ++i ) accu[i-1] = std::pair<uint64_t,uint64_t>( std::pair<uint64_t,uint64_t>(preaccu[i-1],preaccu[i]) ); return accu; }
/** * Calculates the multivariate Gaussian probability density function for each * data point (column) in the given matrix, with respect to the given mean and * variance. * * @param x List of observations. * @param mean Mean of multivariate Gaussian. * @param cov Covariance of multivariate Gaussian. * @param probabilities Output probabilities for each input observation. */ inline void phi(const arma::mat& x, const arma::vec& mean, const arma::mat& cov, arma::vec& probabilities) { // Column i of 'diffs' is the difference between x.col(i) and the mean. arma::mat diffs = x - (mean * arma::ones<arma::rowvec>(x.n_cols)); // Now, we only want to calculate the diagonal elements of (diffs' * cov^-1 * // diffs). We just don't need any of the other elements. We can calculate // the right hand part of the equation (instead of the left side) so that // later we are referencing columns, not rows -- that is faster. arma::mat rhs = -0.5 * inv(cov) * diffs; arma::vec exponents(diffs.n_cols); // We will now fill this. for (size_t i = 0; i < diffs.n_cols; i++) exponents(i) = exp(accu(diffs.unsafe_col(i) % rhs.unsafe_col(i))); probabilities = pow(2 * M_PI, (double) mean.n_elem / -2.0) * pow(det(cov), -0.5) * exponents; }
void BoxesSystem::update_state_and_particles(const mat& x_t, const mat& P_t, const mat& u_t, mat& x_tp1, mat& P_tp1) { int M = P_t.n_cols; x_tp1 = this->dynfunc(x_t, u_t); // receive noisy measurement mat z_tp1 = this->obsfunc(x_tp1, this->box_centers, sample_gaussian(zeros<mat>(N*R_DIM,1), .01*this->R)); mat W(M, 1, fill::zeros); mat r(N*R_DIM, 1, fill::zeros); // for each particle, weight by gauss_likelihood of that measurement given particle/agent observation for(int m=0; m < M; ++m) { mat z_particle = this->obsfunc(x_tp1, P_t.col(m), r); mat e = z_particle - z_tp1; W(m) = this->gauss_likelihood(e, this->R); } W = W / accu(W); double sampling_noise = uniform(0, 1/double(M)); P_tp1 = this->low_variance_sampler(P_t, W, sampling_noise); }
/*! * \brief forward * X: [N, C, 1, 1], usually the output of affine(fc) layer * Y: [N, C, 1, 1], ground truth, with 1(true) or 0(false) * \param[in] const vector<Blob*>& in in[0]:X, in[1]:Y * \param[out] double& loss loss * \param[out] Blob** out out: dX */ void SoftmaxLossLayer::go(const vector<shared_ptr<Blob>>& in, double& loss, shared_ptr<Blob>& dout, int mode) { //Blob X(*in[0]); //Blob Y(*in[1]); if (dout) { dout.reset(); } int N = in[0]->get_N(); int C = in[0]->get_C(); int H = in[0]->get_H(); int W = in[0]->get_W(); assert(H == 1 && W == 1); mat mat_x = in[0]->reshape(); mat mat_y = in[1]->reshape(); /*! forward */ mat row_max = repmat(arma::max(mat_x, 1), 1, C); mat_x = arma::exp(mat_x - row_max); mat row_sum = repmat(arma::sum(mat_x, 1), 1, C); mat e = mat_x / row_sum; //e.print("e:\n"); //mat rrs = arma::sum(e, 1); //rrs.print("rrs:\n"); mat prob = -arma::log(e); //prob.print("prob:\n"); //(prob%mat_y).print("gg:\n"); /*! loss should near -log(1/C) */ loss = accu(prob % mat_y) / N; /*! only forward */ if (mode == 1) return; /*! backward */ mat dx = e - mat_y; dx /= N; mat2Blob(dx, dout, (*in[0]).size()); return; }
double GMM<FittingType>::LogLikelihood( const arma::mat& data, const std::vector<arma::vec>& meansL, const std::vector<arma::mat>& covariancesL, const arma::vec& weightsL) const { double loglikelihood = 0; arma::vec phis; arma::mat likelihoods(gaussians, data.n_cols); for (size_t i = 0; i < gaussians; i++) { phi(data, meansL[i], covariancesL[i], phis); likelihoods.row(i) = weightsL(i) * trans(phis); } // Now sum over every point. for (size_t j = 0; j < data.n_cols; j++) loglikelihood += log(accu(likelihoods.col(j))); return loglikelihood; }
long double TopicSearch::calc_lnTP_hybrid_multi_randomwalk( size_t num_words, vector <size_t> word_indices, uvec Z_prime, double random_walk_prob, double multi_jump_prob) { long double prob = 0.0; long double multi_jump = 1.0; vec beta_w; for (size_t i = 0; i < num_words; i++){ beta_w = this->beta_counts_.col(this->word_ids_(word_indices[i])); multi_jump *= beta_w(Z_prime(i)) / (accu(beta_w) + 1); } prob = random_walk_prob * multi_jump_prob * multi_jump + random_walk_prob * (1.0 - multi_jump_prob) * (long double)num_words + (1.0 - random_walk_prob) * num_words / (long double) this->num_topics_; return log(prob + 1e-24); }
// add_penalty to the target error 'terr' void add_penalty(const unsigned int & i_e, vec & terr, const mat & W, const mat & H, const unsigned int & N_non_missing, const vec & alpha, const vec & beta) { // add penalty term back to the loss function (terr) if (alpha(0) != alpha(1)) terr(i_e) += 0.5*(alpha(0)-alpha(1))*accu(square(W))/N_non_missing; if (beta(0) != beta(1)) terr(i_e) += 0.5*(beta(0)-beta(1))*accu(square(H))/N_non_missing; if (alpha(1) != 0) terr(i_e) += 0.5*alpha(1)*accu(W*W.t())/N_non_missing; if (beta(1) != 0) terr(i_e) += 0.5*beta(1)*accu(H*H.t())/N_non_missing; if (alpha(2) != 0) terr(i_e) += alpha(2)*accu(W)/N_non_missing; if (beta(2) != 0) terr(i_e) += beta(2)*accu(H)/N_non_missing; }
/* * This function calculates partition probability for * a document. * * Ref: LDA production partition model by George Casella * */ double TopicSearch::calc_ln_partition_probality( vector<size_t> word_indices, uvec Z) { double partition_probability = 0.0; mat beta_counts = zeros<mat> (this->num_topics_, this->vocabulary_size_); // Calculate m_ji' s for (size_t n = 0; n < word_indices.size(); n++) beta_counts(Z(n), this->word_ids_(word_indices[n])) += 1; // Calculate partition counts from m_ji' s; i' = 1 ... V vec partition_counts = sum(beta_counts, 1); // sums over rows // ln_gamma (n_j + alpha_j + 1) vec ln_gamma_j = log_gamma_vec(partition_counts + this->alpha_vec_); // ln_gamma (n_j + alpha_j) // ln a_j = \sum_i' (m_ji' * ln beta_ji') vec ln_a_j = sum(beta_counts % this->ln_init_beta_sample_, 1); // sums over rows i.e. over i' s partition_probability = accu(ln_gamma_j + ln_a_j); // sum over all j s - ln_gamma_K return partition_probability; }
/*! * \brief convolutional layer forward * X: [N, C, Hx, Wx] * weight: [F, C, Hw, Ww] * bias: [F, 1, 1, 1] * out: [N, F, (Hx+pad*2-Hw)/stride+1, (Wx+pad*2-Ww)/stride+1] * \param[in] const vector<Blob*>& in in[0]:X, in[1]:weights, in[2]:bias * \param[in] const ConvParam* param conv params: stride, pad * \param[out] Blob** out Y */ void ConvLayer::forward(const vector<shared_ptr<Blob>>& in, shared_ptr<Blob>& out, Param& param) { if (out) { out.reset(); } assert(in[0]->get_C() == in[1]->get_C()); int N = in[0]->get_N(); int F = in[1]->get_N(); int C = in[0]->get_C(); int Hx = in[0]->get_H(); int Wx = in[0]->get_W(); int Hw = in[1]->get_H(); int Ww = in[1]->get_W(); // calc Hy, Wy int Hy = (Hx + param.conv_pad*2 -Hw) / param.conv_stride + 1; int Wy = (Wx + param.conv_pad*2 -Ww) / param.conv_stride + 1; out.reset(new Blob(N, F, Hy, Wy)); Blob padX = (*in[0]).pad(param.conv_pad); for (int n = 0; n < N; ++n) { for (int f = 0; f < F; ++f) { for (int hh = 0; hh < Hy; ++hh) { for (int ww = 0; ww < Wy; ++ww) { cube window = padX[n](span(hh * param.conv_stride, hh * param.conv_stride + Hw - 1), span(ww * param.conv_stride, ww * param.conv_stride + Ww - 1), span::all); (*out)[n](hh, ww, f) = accu(window % (*in[1])[f]) + as_scalar((*in[2])[f]); } } } } return; }
//Calculation of BICreg List Vect::bicReggen(vector<int> vectH, vector<int> vectY, int numr) { double reg = 0.0, sign, val; // Ici, H est la matrice réponse. mat H=Vect::const_matrix(vectH); int n = H.n_rows,v = H.n_cols; //construction of the matrix X int a; if (vectY.empty()) a=0; else a=vectY.size(); mat X(n,a+1); if (vectY.empty()) X.col(0) = ones<colvec>(n); else { mat Y = Vect::const_matrix(vectY); Y.insert_cols(0, ones<colvec>(n)); X = Y; } //Parameter estimation mat XtX = X.t() * X; mat B = inv_sympd(XtX) * X.t() *H; //mat B = pinv(XtX) * X.t() *H; double lambda; mat A=X*B; if (numr==3) //(r=[LC]) { mat Omega = (1.0/n)*(H.t()*(H-A)); Omega = 2*M_PI*Omega; log_det(val, sign, Omega); double det = log(sign*exp(val)); lambda = ((a+1)*v) + (0.5*v*(v+1)); reg = (-n*det)-(n*v)-(lambda*log(n)); } if (numr==2) //(r=[LB]) { mat H_A = (1.0/n)*(H - A)%(H - A); rowvec sigma2 = sum(H_A, 0); sigma2 = log(sigma2); lambda=(v*(a+1)) +v; reg=-(n*v*log(2*M_PI)) - (n* sum(sigma2)) -(n*v) - (lambda*log(n)); } if (numr==1) //(r=[LI]) { mat Aux=H-A; double sigma=(1.0/(n*v))* accu(Aux % Aux); lambda=(v*(a+1)) + 1; reg=-(n*v*log(2*M_PI*sigma)) -(n*v) - (lambda*log(n)); } return List::create(Named("bicvalue") = reg, Named("B") = B); }//end Vect::bicReggen