/** * Estimate the Gaussian distribution directly from the given observations. * * @param observations List of observations. */ void GaussianDistribution::Estimate(const arma::mat& observations) { if (observations.n_cols > 0) { mean.zeros(observations.n_rows); covariance.zeros(observations.n_rows, observations.n_rows); } else // This will end up just being empty. { mean.zeros(0); covariance.zeros(0); return; } // Calculate the mean. for (size_t i = 0; i < observations.n_cols; i++) mean += observations.col(i); // Normalize the mean. mean /= observations.n_cols; // Now calculate the covariance. for (size_t i = 0; i < observations.n_cols; i++) { arma::vec obsNoMean = observations.col(i) - mean; covariance += obsNoMean * trans(obsNoMean); } // Finish estimating the covariance by normalizing, with the (1 / (n - 1)) so // that it is the unbiased estimator. covariance /= (observations.n_cols - 1); // Ensure that the covariance is positive definite. if (det(covariance) <= 1e-50) { Log::Debug << "GaussianDistribution::Estimate(): Covariance matrix is not " << "positive definite. Adding perturbation." << std::endl; double perturbation = 1e-30; while (det(covariance) <= 1e-50) { covariance.diag() += perturbation; perturbation *= 10; // Slow, but we don't want to add too much. } } }
// [[Rcpp::export]] List ZupdateLSM(arma::mat Y,arma::mat Z,double Intercept,int dd, int nn,arma::mat var, arma::vec llikOld,arma::vec acc,arma::vec tune) { arma::vec Zsm(dd); arma::vec Znewsm(dd); arma::vec ZsmPrev(dd); double prior; double priorNew; double llikNew; double logratio; // print(var0) arma::mat Znew(&Z[0],nn,dd); arma::mat Znewt = Znew.t();//matrix to store updated values arma::mat Ztt = Z.t(); //update for t = 1 //update Z_t by row for(int i=0; i < nn; i ++){ Zsm = Ztt.col(i); ZsmPrev = arma::zeros<arma::vec>(dd); // ZsmPrev.print(); //prior density at current values prior = logdmvnorm(Zsm,ZsmPrev,var,dd); // Rprintf("pO1 %f",prior1); // Rprintf("pO2 %f",prior2); //propose new vector for(int ww = 0 ; ww < dd ; ww++){ Znewsm(ww) = Zsm(ww) + tune(i)*rnorm(1,0.0,1.0)[0]; } // Znewsm.print("Znewsm: "); Znewt.col(i) = Znewsm; // Znew.print(); //prior density at proposed values // meanNew2.print(); priorNew = logdmvnorm(Znewsm,ZsmPrev,var,dd); llikNew = likelihoodi((i+1),dd,nn,Y,Znewt.t(),Intercept); //compute log acceptance ratio double ll = llikOld(i); logratio = llikNew-ll+priorNew-prior; // Rprintf("logratio %f",logratio); if(log(runif(1,0.0,1.0)[0]) < logratio){ // Rprintf("step %f",1.0); Ztt.col(i) = Znewsm; //move to proposed values acc(i) = acc(i)+1; //update acceptance llikOld(i) = llikNew; //update the likelihood matrix }else{ // Rprintf("step %f",1.0); Znewt.col(i) = Zsm; } //otherwise stay at current state } Z = Ztt.t(); return List::create( _["Z"] = Z, _["acc"] = acc, _["llikOld"] = llikOld ); }
static arma::vec s_recall(arma::mat &runMatrix, Qrels &qrels, int rank){ // Use a greedy approach to find the max subtopics at rank k // Initialize working variables int numOfSubtopics = int(*qrels.subtopics.rbegin()); int numOfRows = min(rank, qrels.numOfRelDocs); arma::vec maxSubtopics = arma::zeros(rank); arma::rowvec seenSubtopics = arma::zeros<arma::rowvec>(numOfSubtopics); arma::vec tmpVector; set<int> idealRankList; set<int>::iterator it; // Greedy approach searches for the best document at each rank for(int i = 0; i < numOfRows; i++){ int maxNewSubtopics = 0; int pickedDoc = -1; // Iterate through the set of relevant documents to find // the best document for(int j = 0; j < qrels.matrix.n_rows; j++){ // Ignore the document already picked it = idealRankList.find(j); if(it != idealRankList.end() ) continue; // Compute the number of new subtopics the document contains int numOfNewSubtopics = arma::sum((qrels.matrix.row(j) + seenSubtopics) > arma::zeros<arma::vec>(numOfSubtopics)); // Keep track of the best document/ max number of subtopics if(numOfNewSubtopics > maxNewSubtopics){ maxNewSubtopics = numOfNewSubtopics; pickedDoc = j; } } // Add the best document to the ideal rank list // and keep track of subtopics seen seenSubtopics += qrels.matrix.row(pickedDoc); idealRankList.insert(pickedDoc); maxSubtopics(i) = arma::sum(seenSubtopics > arma::zeros<arma::vec>(numOfSubtopics)); } if(numOfRows < rank){ for(int i=numOfRows; i < rank; i++) maxSubtopics(i) = maxSubtopics(numOfRows - 1); } // Consider only the top 'rank' number of documents arma::vec s_recall = arma::zeros(rank); for(int i=0; i < rank; i++){ arma::mat matrix = runMatrix.rows(0,i); double retSubtopics = arma::sum(arma::sum(matrix) > arma::zeros<arma::vec>(numOfSubtopics)); s_recall(i) = (retSubtopics/maxSubtopics(i)); } return s_recall; }
double HMM::forwardProcedure(const arma::mat & data) { if (N_ != BModels_.size()) throw std::logic_error("The number of mixture models doesn't match the number of states"); //initialisation for(unsigned int i = 0; i < N_; ++i) { alpha_(i, 0) = pi_[i] * BModels_[i].getProb(data.col(0)); } c_(0) = arma::accu(alpha_.col(0)); alpha_.col(0) /= arma::as_scalar(c_(0)); //alpha_.print("alpha"); //c_.print("scale"); //iteration for(unsigned int t = 1; t < T_; ++t) { for (unsigned int j = 0; j < N_; ++j) { alpha_(j, t) = arma::as_scalar(A_.col(j).t() * alpha_.col(t-1)) * BModels_[j].getProb(data.col(t)); } c_(t) = arma::accu(alpha_.col(t)); alpha_.col(t) /= arma::as_scalar(c_(t)); } pprob_ = arma::accu(arma::log(c_)); return pprob_; }
// [[Rcpp::export]] double jag_fun(const double Eps, const double Tau, const arma::vec k, const arma::mat& Y, const arma::vec snp, const arma::mat& R){ const double nobs = Y.n_cols; const double kmax = R.n_cols; double Ub=0.0, meanB=0.0, meanG=0.0; arma::vec Yhat(kmax); arma::mat Ugam(kmax,nobs,arma::fill::zeros); arma::mat varGMAT(kmax,kmax,arma::fill::zeros); arma::rowvec Ysum = sum(Y,0); for(int i =0; i<nobs; i++){ double G = snp[i]-mean(snp); double V1 = (Eps+(k[i]-1)*Tau) / ( (Eps*Eps)+k[i]*Eps*Tau ); double V2 = - ( (Tau)/(Eps*Eps+k[i]*Eps*Tau) ); arma::mat V(kmax,kmax); V.fill(V2); arma::vec V1_vec = rep(V1,kmax); V.diag() = V1_vec; arma::rowvec R_vec = R.row(i); arma::mat RR = trans(R.row(i)) * R.row(i); varGMAT+= G*G*(RR % V); arma::colvec Yhat = Y.col(i); Ugam.col(i) = (trans(R_vec)*G) % ( (V1*Yhat) + (V2*(Ysum[i]-Yhat)) ); Ub+=G * (V1 + (k[i]-1)*V2)*Ysum[i]; meanB+= G * G * (V1+(k[i]-1)*V2) * k[i]; meanG+=k[i] * G * G * V1; } const double U2beta = Ub*Ub; const double Ugamma = 0.5 * accu( sum(Ugam,1) % sum(Ugam,1) ); const double varB = 2 * meanB * meanB; meanG = 0.5 * meanG; const double varG = 0.5 * accu(varGMAT%varGMAT); const double cov = accu( sum(varGMAT,0)%sum(varGMAT,0) ); const double agam = (varB - cov)/(varB+varG-2*cov); const double abeta = (varG - cov)/(varB+varG-2*cov); const double Upsi = abeta*U2beta + agam*Ugamma; const double meanPSI = (abeta*meanB)+ (agam*meanG); const double varPSI = (abeta*varB*abeta) + (agam*agam*varG); const double a1 = varPSI/(2*meanPSI); const double a2 = (2*meanPSI*meanPSI)/varPSI; const double scaled_upsi = Upsi/a1; double jag_pval = 1 - R::pchisq(scaled_upsi,a2,1,0); if(jag_pval==0) jag_pval = 2e-16; return jag_pval; }
void print(std::string header = "") { A_.print("A"); for (size_t i = 0; i < BModels_.size(); ++i) { BModels_[i].print("B"); } pi_.print("pi"); }
arma::mat MinimizerBase::findPositionDeviations(const arma::mat& simPos, const arma::mat& expPos) const { // ASSUMPTION: matrices must be sorted in increasing Z order. // Assume also that the matrices are structured as: // (x, y, z, ...) arma::vec xInterp; arma::vec yInterp; arma::interp1(simPos.col(2), simPos.col(0), expPos.col(2), xInterp); arma::interp1(simPos.col(2), simPos.col(1), expPos.col(2), yInterp); arma::mat result (xInterp.n_rows, 2); result.col(0) = (xInterp - expPos.col(0)) / posChi2Norm; result.col(1) = (yInterp - expPos.col(1)) / posChi2Norm; return result; }
std::vector<std::vector<float>> signatureWindows(arma::mat &path, int logSigDepth, int windowSize) { std::vector<std::vector<float>> sW(path.n_rows); std::vector<float> repPath; for (int i = 0; i < path.n_rows; ++i) { repPath.push_back(i); repPath.push_back(path(i, 0)); repPath.push_back(path(i, 1)); } for (int i = 0; i < path.size(); ++i) { sW[i].resize(logsigdim(3, logSigDepth)); int first = std::max(0, i - windowSize); int last = std::min((int)path.size() - 1, i + windowSize); logSignature(&repPath[3 * first], last - first + 1, 3, logSigDepth, &sW[i][0]); } return sW; }
// objective ------------------------------------------------------------------- // @title Objective function to minimize // @description This is the sum of squared error at observed entries, // between the ratings matrix and its approximation by latent factors and // covariates. // @param X The ratings matrix. Unobserved entries must be marked NA. Users // must be along rows, and tracks must be along columns. // @param Z The covariates associated with each pair. This must be shaped as an // array with users along rows, tracks along columns, and features along // slices. // @param P The learned user latent factors. // @param Q The learned track latent factors. // @param beta The learned regression coefficients. // @param lambda The regularization parameters for P, Q, and beta, respectively. // @return The value of the objective function given the current parameter // values. double objective_fun(arma::mat X, arma::cube Z, arma::mat P, arma::mat Q, arma::vec beta, Rcpp::NumericVector lambdas) { arma::uvec obs_ix = arma::conv_to<arma::uvec>::from(arma::find_finite(X)); arma::mat resid = X - P * Q.t() - cube_multiply(Z, beta); return arma::sum(arma::pow(resid(obs_ix), 2)) + arma::as_scalar(lambdas[0] * arma::accu(arma::pow(P, 2))) + arma::as_scalar(lambdas[1] * arma::accu(arma::pow(Q, 2))) + arma::as_scalar(lambdas[2] * arma::accu(arma::pow(beta, 2))); }
//[[Rcpp::export]] double computeSStat_cpp(arma::colvec beta, arma::mat invS, arma::colvec y, arma::mat X, int j, int m) { mat newX = X.rows(j - 1, j + m - 2); colvec newY = y.rows(j - 1, j + m - 2); mat A = trans(newX) * invS * (newY - newX * beta); mat V = trans(newX) * invS * newX; mat S = trans(A) * inv(V) * A; double s = S(0, 0); return(s); }
void MaximalInputs(const arma::mat& parameters, arma::mat& output) { arma::mat paramTemp(parameters.submat(0, 0, (parameters.n_rows - 1) / 2 - 1, parameters.n_cols - 2).t()); double const mean = arma::mean(arma::mean(paramTemp)); paramTemp -= mean; NormalizeColByMax(paramTemp, output); }
/* * Computes the H matrix (used in computation of LR stat) */ arma::mat compute_H (const arma::mat& V, const std::vector<int>& groups, int num_groups){ arma::mat H (num_groups, num_groups, arma::fill::zeros); for (int k = 0; k < groups.size(); k++){ int group_k = groups[k]; for (int l = k; l < groups.size(); l++){ int group_l = groups[l]; double dot = arma::dot(V.col(k), V.col(l)); double dot_sq = dot*dot; H.col(group_l)[group_k] += dot_sq; if (group_k != group_l){ H.col(group_k)[group_l] += dot_sq; } } } return H; }
void RNN< LayerTypes, OutputLayerType, InitializationRuleType, PerformanceFunction >::Predict(arma::mat& predictors, arma::mat& responses) { arma::mat responsesTemp; SinglePredict(arma::mat(predictors.colptr(0), predictors.n_rows, 1, false, true), responsesTemp); responses = arma::mat(responsesTemp.n_elem, predictors.n_cols); responses.col(0) = responsesTemp.col(0); for (size_t i = 1; i < predictors.n_cols; i++) { SinglePredict(arma::mat(predictors.colptr(i), predictors.n_rows, 1, false, true), responsesTemp); responses.col(i) = responsesTemp.col(0); } }
MocapStream::Frame MocapStream::createFrame(arma::mat m, const std::set<int>& allowedIDs){ Frame f; // std::cout << "Loading " << m.n_cols << " rigid bodies" << std::endl; // std::cout << m << std::endl; for(int n = 0; n < m.n_cols; n++){ arma::vec data = m.col(n); RigidBody r; arma::vec3 pos = data.rows(1,3); Rotation3D rot; int start = 4; for(int i = 0; i < 3; i++){ rot.row(i) = data.rows(start + 3 * i, start + 3 * i + 2).t(); } //Change back to mocap coords from nubots coords (sigh...) if(correctForAutocalibrationCoordinateSystem){ r.pose.translation() = arma::vec3{-pos[1],pos[2],-pos[0]}; } else { r.pose.translation() = pos; } if(correctForAutocalibrationCoordinateSystem){ UnitQuaternion q(rot); //Change back to mocap coords from nubots coords (sigh...) UnitQuaternion q_(arma::vec4{ q.kX(), -q.kZ(), q.kW(), -q.kY(), }); //Turn back into rotation r.pose.rotation() = Rotation3D(q_); }else{ r.pose.rotation() = rot; } if(LHInput){ transformLHtoRH(r.pose); } // std::cout << "data: " << data << std::endl; // std::cout << "id:" << int(data[0]) << std::endl; // std::cout << "position:" << r.pose.translation() << std::endl; // std::cout << "rotation:" << r.pose.rotation() << std::endl; if(!r.pose.is_finite()) { std::cout << __FILE__ << " - " << __LINE__ << "Warning: nan data not loaded for RB " << int(data[0]) << std::endl; continue; } if(allowedIDs.empty() //empty allowed IDs means allow any || allowedIDs.count(int(data[0])) > 0){ f.rigidBodies[int(data[0])] = r; } } return f; }
arma::vec Vespucci::Math::Quantification::CorrelationMat(const arma::mat &X, arma::vec control) { arma::vec results; results.set_size(X.n_cols); for(arma::uword i = 0; i < X.n_cols; ++i){ results(i) = arma::as_scalar(cor(control, X.col(i))); } return results; }
/// /// \brief Vespucci::Math::DimensionReduction::EstimateAdditiveNoise /// \param noise /// \param noise_correlation /// \param sample /// The slow additive noise mat::mator from the HySime paper. I'm looking into /// faster ones. void Vespucci::Math::DimensionReduction::EstimateAdditiveNoise(arma::mat &noise, arma::mat &noise_correlation, const arma::mat &sample) { double small = 1e-6; noise = arma::zeros(sample.n_rows, sample.n_cols); arma::mat RR = sample * sample.t(); arma::mat RRi = arma::inv(RR + small*arma::eye(sample.n_rows, sample.n_rows)); arma::mat XX, RRa, beta; for (arma::uword i = 0; i < sample.n_rows; ++i){ XX = RRi - (RRi.col(i) * RRi.row(i))/RRi(i, i); RRa = RR.col(i); RRa(i) = 0; beta = XX * RRa; beta(i) = 0; noise.row(i) = sample.row(i) - beta.t() * sample; } arma::mat nn = noise * noise.t() / sample.n_rows; noise_correlation = arma::diagmat(nn.diag()); }
bool subspaceIdMoor::simple_dlyap(arma::mat const &A, arma::mat const &Q, arma::mat &X){ mat kronProd = kron(A, A); mat I = eye(kronProd.n_rows, kronProd.n_cols); bool slvflg = solve(X, I - kronProd, vectorise(Q)); /*Reshape vec to matrix:*/ X.reshape(A.n_rows, A.n_rows); return slvflg; }
void CosineTreeBuilder::LSSampling(arma::mat A, arma::vec& probability) { Log::Info<<"LSSampling"<<std::endl; //Saving the frobenious norm of the matrix double normA = arma::norm(A,"fro"); //Calculating probability of each point to be sampled for (size_t i=0;i<A.n_rows;i++) probability(i) = arma::norm(A.row(i),"fro")/normA; }
/** * The update function that actually updates the H matrix. The function takes * in all the matrices and only changes the value of the H matrix. * * @param V Input matrix to be factorized. * @param W Basis matrix. * @param H Encoding matrix to updated. */ inline static void Update(const arma::mat& V, const arma::mat& W, arma::mat& H) { // Simple implementation left in the header file. arma::mat t1; arma::colvec t2; t1 = W * H; for (size_t i = 0; i < H.n_rows; i++) { for (size_t j = 0; j < H.n_cols; j++) { t2 = W.col(i) % V.col(j) / t1.col(j); H(i,j) = H(i,j) * sum(t2) / sum(W.col(i)); } } }
/** * The update rule for the encoding matrix H. * The function takes in all the matrices and only changes the * value of the H matrix. * * @param V Input matrix to be factorized. * @param W Basis matrix. * @param H Encoding matrix to be updated. */ inline void HUpdate(const arma::sp_mat& V, const arma::mat& W, arma::mat& H) { (void)V; arma::mat deltaH(H.n_rows, 1); deltaH.zeros(); size_t currentUserIndex = it->col(); size_t currentItemIndex = it->row(); deltaH += (**it - arma::dot(W.row(currentItemIndex), H.col(currentUserIndex))) * arma::trans(W.row(currentItemIndex)); if(kh != 0) deltaH -= kh * H.col(currentUserIndex); H.col(currentUserIndex) += u * deltaH; }
/** * The update function that actually updates the W matrix. The function takes * in all the matrices and only changes the value of the W matrix. * * @param V Input matrix to be factorized. * @param W Basis matrix to be updated. * @param H Encoding matrix. */ inline static void Update(const arma::mat& V, arma::mat& W, const arma::mat& H) { // Simple implementation left in the header file. arma::mat t1; arma::rowvec t2; t1 = W * H; for (size_t i = 0; i < W.n_rows; ++i) { for (size_t j = 0; j < W.n_cols; ++j) { t2 = H.row(j) % V.row(i) / t1.row(i); W(i, j) = W(i, j) * sum(t2) / sum(H.row(j)); } } }
// The step size should be chosen carefully to guarantee convergence given a // reasonable number of computations. int GradientDescent::RunGradientDescent(const DataNormalized &data) { assert(num_iters_ >= 1); const int kNumTrainEx = data.num_train_ex(); assert(kNumTrainEx >= 1); // For this exercise, there should be at least two training features. const int kNumFeatures = data.num_features(); assert(kNumFeatures >= 2); const arma::mat kTrainingFeaturesNormalized = \ data.training_features_normalized(); const arma::vec kTrainingLabels = data.training_labels(); double *j_theta_array = new double[num_iters_]; // Recall that we are trying to minimize the following cost function: // ((training features) * (current weights) - (training labels))^2 // Each iteration of this algorithm updates the weights as a scaled // version of the gradient of this cost function. // This gradient is computed with respect to the weights. // Thus, each iteration of gradient descent performs the following: // (update) = (step size) * ((training features) * (current weights) - (training labels)) * (training features) // (new weights) = (current weights) - (update) for(int theta_index=0; theta_index<num_iters_; theta_index++) { const arma::vec kDiffVec = \ kTrainingFeaturesNormalized*theta_-kTrainingLabels; arma::mat kDiffVecTimesTrainFeat = \ kDiffVec % kTrainingFeaturesNormalized.col(0); for(int feature_index=1; feature_index<=kNumFeatures; feature_index++) { kDiffVecTimesTrainFeat = join_rows(kDiffVecTimesTrainFeat,\ kDiffVec % kTrainingFeaturesNormalized.col(feature_index)); } const arma::vec kThetaNew = theta_-alpha_*(1/(float)kNumTrainEx)*\ (sum(kDiffVecTimesTrainFeat)).t(); j_theta_array[theta_index] = ComputeCost(data); set_theta(kThetaNew); } delete [] j_theta_array; return 0; }
double find_P(const arma::mat& X, const arma::mat& Y, double sigma2, float outliers, arma::vec& P1, arma::vec& Pt1, arma::mat& PX, bool use_fgt, const float epsilon) { P1.zeros(); Pt1.zeros(); PX.zeros(); const arma::uword N = X.n_rows; const arma::uword M = Y.n_rows; const arma::uword D = Y.n_cols; const double h = std::sqrt(2 * sigma2); const double ndi = (outliers * M * std::pow(2 * M_PI * sigma2, 0.5 * D)) / ((1 - outliers) * N); arma::vec q = arma::ones<arma::vec>(M); fgt::GaussTransformUnqPtr transformY; if (use_fgt) { transformY = fgt::choose_gauss_transform(Y, h, epsilon); } else { transformY.reset(new fgt::Direct(Y, h)); } arma::vec denomP = transformY->compute(X, q); denomP = denomP + ndi; Pt1 = 1 - ndi / denomP; q = 1 / denomP; fgt::GaussTransformUnqPtr transformX; if (use_fgt) { transformX = fgt::choose_gauss_transform(X, h, epsilon); } else { transformX.reset(new fgt::Direct(X, h)); } P1 = transformX->compute(Y, q); for (arma::uword i = 0; i < D; ++i) { q = X.col(i) / denomP; arma::vec c = PX.unsafe_col(i); PX.col(i) = transformX->compute(Y, q); } return -arma::sum(arma::log(denomP)) + D * N * std::log(sigma2) / 2; }
void MaxVarianceNewCluster::Precalculate(const MatType& data, const arma::mat& oldCentroids, arma::Col<size_t>& clusterCounts, MetricType& metric) { // We have to calculate the variances of each cluster and the assignments of // each point. This is most easily done by iterating through the entire // dataset. variances.zeros(oldCentroids.n_cols); assignments.set_size(data.n_cols); // Add the variance of each point's distance away from the cluster. I think // this is the sensible thing to do. for (size_t i = 0; i < data.n_cols; ++i) { // Find the closest centroid to this point. double minDistance = std::numeric_limits<double>::infinity(); size_t closestCluster = oldCentroids.n_cols; // Invalid value. for (size_t j = 0; j < oldCentroids.n_cols; j++) { const double distance = metric.Evaluate(data.col(i), oldCentroids.col(j)); if (distance < minDistance) { minDistance = distance; closestCluster = j; } } assignments[i] = closestCluster; variances[closestCluster] += std::pow(metric.Evaluate(data.col(i), oldCentroids.col(closestCluster)), 2.0); } // Divide by the number of points in the cluster to produce the variance, // unless the cluster is empty or contains only one point, in which case we // set the variance to 0. for (size_t i = 0; i < clusterCounts.n_elem; ++i) if (clusterCounts[i] <= 1) variances[i] = 0; else variances[i] /= clusterCounts[i]; }
// // [[Rcpp::export()]] void getEystar_hierIRT(arma::mat &Eystar, const arma::mat &y, const arma::mat &z, const arma::mat &g, const arma::mat &i, const arma::mat &j, const arma::mat &Ea, const arma::mat &Eb, const arma::mat &Egamma, const arma::mat &Eta, const int ND, const int NG, const int NI, const int NJ, const int NL ) { double ml; signed int l; // Main Calculation #pragma omp parallel for for(l=0; l < NL; l++){ ml = Ea(j(l,0),0) + Eb(j(l,0),0)*accu(Egamma.row(g(i(l,0),0)) % z.row(i(l,0))) + Eta(i(l,0),0)*Eb(j(l,0),0); // if(y(l,0)==1) Eystar(l,0) = RcppTN::etn1(ml, 1.0, 0.0, R_PosInf); // if(y(l,0)==-1) Eystar(l,0) = RcppTN::etn1(ml, 1.0, R_NegInf, 0.0); // if(y(l,0)==0) Eystar(l,0) = RcppTN::etn1(ml, 1.0, R_NegInf, R_PosInf); if(y(l,0)==1) Eystar(l,0) = etn1(ml, 1.0, 0.0, R_PosInf); if(y(l,0)==-1) Eystar(l,0) = etn1(ml, 1.0, R_NegInf, 0.0); if(y(l,0)==0) Eystar(l,0) = etn1(ml, 1.0, R_NegInf, R_PosInf); // Note: Taking etn() of extreme truncated normals is a problem // etn(-9.49378,1,0,1000) gives Inf for Eystar, which crashes everything // In these cases, we should ignore the vote if( !(arma::is_finite(Eystar(l,0))) ) Eystar(l,0) = ml; } // end for(l=0; l < NL; l++) return; }
// [[Rcpp::export]] double energyscoreC(arma::colvec y, arma::mat dat){ double s1 = 0; double m = dat.n_cols; for (int i = 1; i < (m+1); i++) { s1 += euclnormC(dat.col(i-1) - y); } double s2 = 0; for (int i = 1; i < (m+1); i++) { for (int j = i; j < (m+1); j++) { s2 += 2*euclnormC(dat.col(i-1) - dat.col(j-1)); } } double out = (s1 / m) - s2 / (2 * pow(m, 2)); return (out); }
void Gaussian::fit(const arma::mat& data) { if(data.size() > 0){ D = data.n_rows; Mean = arma::mean(data); Covariance = arma::cov(data); A = arma::chol(Covariance); det = arma::det(Covariance); } }
inline void HUpdate(const MatType& V, const arma::mat& W, arma::mat& H) { arma::mat deltaH(H.n_rows, 1); deltaH.zeros(); for(size_t i = 0;i < n;i++) { double val; if((val = V(i, currentUserIndex)) != 0) deltaH += (val - arma::dot(W.row(i), H.col(currentUserIndex))) * arma::trans(W.row(i)); } if(kh != 0) deltaH -= kh * H.col(currentUserIndex); H.col(currentUserIndex++) += u * deltaH; currentUserIndex = currentUserIndex % m; }
inline void SVDIncompleteIncrementalLearning:: WUpdate<arma::sp_mat>(const arma::sp_mat& V, arma::mat& W, const arma::mat& H) { arma::mat deltaW(n, W.n_cols); deltaW.zeros(); for(arma::sp_mat::const_iterator it = V.begin_col(currentUserIndex); it != V.end_col(currentUserIndex);it++) { double val = *it; size_t i = it.row(); deltaW.row(i) += (val - arma::dot(W.row(i), H.col(currentUserIndex))) * arma::trans(H.col(currentUserIndex)); if(kw != 0) deltaW.row(i) -= kw * W.row(i); } W += u*deltaW; }
/** * Apply Dimensionality Reduction using Principal Component Analysis * to the provided data set. * * @param data - M x N Data matrix * @param newDimension - matrix consisting of N column vectors, * where each vector is the projection of the corresponding data vector * from data matrix onto the basis vectors contained in the columns of * coeff/eigen vector matrix with only newDimension number of columns chosen. */ void PCA::Apply(arma::mat& data, const size_t newDimension) const { arma::mat coeffs; arma::vec eigVal; Apply(data, data, eigVal, coeffs); if (newDimension < coeffs.n_rows && newDimension > 0) data.shed_rows(newDimension, data.n_rows - 1); }