arma::Col<double> compute_column_means(const arma::Mat<double>& data) { const long n_cols = data.n_cols; arma::Col<double> means(n_cols); for (long i=0; i<n_cols; ++i) means(i) = arma::mean(data.col(i)); return std::move(means); }
const blitz::Array<double,2> bob::learn::em::GMMMachine::getMeans() const { blitz::Array<double,2> means(m_n_gaussians,m_n_inputs); for(size_t i=0; i<m_n_gaussians; ++i) means(i,blitz::Range::all()) = m_gaussians[i]->getMean(); return means; }
std::pair< double, double > gaussian_process::evaluate( const Eigen::MatrixXd& domain ) const { if( domain.rows() != 1 ) { COMMA_THROW( comma::exception, "expected 1 row in domain, got " << domain.rows() << " rows" ); } Eigen::VectorXd means( 1 ); Eigen::VectorXd variances( 1 ); evaluate( domain, means, variances ); return std::make_pair( means( 0 ), variances( 0 ) ); }
Matrix initutil::gonzalez(commonutil::DataSet const& input, idx_type k, std::mt19937& gen) { idx_type n = input.points.cols(); idx_type d = input.points.rows(); initutil::check(k, d, n); Matrix means(d,k); Vector sqNorms; for (idx_type i=0; i<k; ++i) if (i==0) means.col(i) = input.points.col(commonutil::randomIndex(input.weights, gen)); else { if (i==1) sqNorms = (input.points.colwise()-means.col(0)).colwise().squaredNorm(); else { for (idx_type j=0; j<n; ++j) { fp_type sqn = (input.points.col(j)-means.col(i-1)).squaredNorm(); if (sqn<sqNorms[j]) sqNorms[j]=sqn; } } idx_type index; sqNorms.maxCoeff(&index); means.col(i) = input.points.col(index); } return means; }
bool VLFeat::Initialize_kmeans() { bin_data dat(kmeansname); kmeans = vl_kmeans_new(VL_TYPE_FLOAT, VlDistanceL2); vl_kmeans_set_centers(kmeans, dat.vector_address(0), dat.vdim(), dat.nobjects()); if (MethodVerbose()) cout << "VLFeat::Initialize_kmeans() read in kmeans <" << kmeansname << "> dim=" << descriptor_dim() << " nclust=" << nclusters() << endl; if (kdtree_ntrees) { kdtree = vl_kdforest_new(VL_TYPE_FLOAT, descriptor_dim(), kdtree_ntrees, VlDistanceL2); vl_kdforest_build(kdtree, nclusters(), means()); vl_kdforest_set_max_num_comparisons(kdtree, kdtree_maxcomps); } do_vlad = true; return true; }
void calculate_projections(const std::string file_in, const std::string file_out, Matrix<double> eigenvecs, std::size_t mem_buf_size, bool use_correlation, Matrix<double> stats) { // calculating the projection, we need twice the space // (original data + result) mem_buf_size /= 4; std::size_t n_variables = stats.n_rows(); std::vector<double> means(n_variables); std::vector<double> inverse_sigmas(n_variables); for (std::size_t i=0; i < n_variables; ++i) { means[i] = stats(i,0); inverse_sigmas[i] = 1.0 / stats(i,1); } bool append_to_file = false; DataFileReader<double> fh_file_in(file_in, mem_buf_size); DataFileWriter<double> fh_file_out(file_out); read_blockwise(fh_file_in, [&](Matrix<double>& m) { FastPCA::shift_matrix_columns_inplace(m, means); if (use_correlation) { FastPCA::scale_matrix_columns_inplace(m, inverse_sigmas); } fh_file_out.write(std::move(m*eigenvecs), append_to_file); append_to_file = true; }); }
int main() { getcf(); for(nt=0; nt<=ntjob; nt++) { move(); if(nt%ntprint==0) { means(); } } }
vector< T > compute_channel_means( const storage< T, L >& sto ) { // compute means vector< T > means( sto.channel_size() ); for ( index_t row = 0; row < sto.frame_size(); ++row ) for ( index_t col = 0; col < sto.channel_size(); ++col ) means[ col ] += sto( row, col ); for ( auto& m : means ) m /= sto.frame_size(); return means; }
Vector<double> ScalingLayer::arrange_means(void) const { const size_t scaling_neurons_number = get_scaling_neurons_number(); Vector<double> means(scaling_neurons_number); for(size_t i = 0; i < scaling_neurons_number; i++) { means[i] = statistics[i].mean; } return(means); }
Matrix initutil::uniformMeans(commonutil::DataSet const& input, idx_type k, std::mt19937& gen) { idx_type n = input.points.cols(); idx_type d = input.points.rows(); initutil::check(k, d, n); Matrix means(d,k); for (idx_type i=0; i<k; ++i) means.col(i) = input.points.col(commonutil::randomIndex(input.weights, gen)); return means; }
//! Calculates spectral covariance of image CImg<double> GeoImage::spectral_covariance() const { unsigned int NumBands(nbands()); CImg<double> covariance(NumBands, NumBands, 1, 1, 0), bandchunk, matrixchunk; CImg<unsigned char> mask; int validsize; vector<Chunk>::const_iterator iCh; vector<Chunk> _chunks = chunks(); for (iCh=_chunks.begin(); iCh!=_chunks.end(); iCh++) { // Bands x NumPixels matrixchunk = CImg<double>(NumBands, iCh->area(),1,1,0); mask = nodata_mask(*iCh); validsize = mask.size() - mask.sum(); int p(0); for (unsigned int b=0;b<NumBands;b++) { bandchunk = (*this)[b].read<double>(*iCh); p = 0; cimg_forXY(bandchunk,x,y) { if (mask(x,y)==0) matrixchunk(b,p++) = bandchunk(x,y); } } if (p != (int)size()) matrixchunk.crop(0,0,NumBands-1,p-1); covariance += (matrixchunk.get_transpose() * matrixchunk)/(validsize-1); } // Subtract Mean CImg<double> means(NumBands); for (unsigned int b=0; b<NumBands; b++) means(b) = (*this)[b].stats()[2]; //cout << "Mean b" << b << " = " << means(b) << endl; } covariance -= (means.get_transpose() * means); if (Options::verbose() > 2) { std::cout << basename() << " Spectral Covariance Matrix:" << endl; cimg_forY(covariance,y) { std::cout << "\t"; cimg_forX(covariance,x) { std::cout << std::setw(18) << covariance(x,y); }
void kmeansModule::initClustersUnlabeled(float** data, int M, vector<int>& pts) { int KK = 10;//number of clusters int N = pts.size(); vector<vector<float> > means(KK , vector<float> (M, 0)); srand(time(0)); random_shuffle ( pts.begin(), pts.end() ); for (int k = 0; k < KK; k++) for (int j = 0; j < M; j++) if (weights[j]) means[k][j] = data[pts[k]][j]; }
std::vector<float> get_correlation_matrix(TNtuple* nt) { int nentries = nt->GetEntries(); // get list of branch names std::vector<std::string> names; for (int i=0; i<nt->GetListOfBranches()->GetEntries(); i++) { std::string name = nt->GetListOfBranches()->At(i)->GetName(); if (name == "likelihood") { continue; } names.push_back(name); } std::vector<float> matrix(names.size() * names.size()); // convert the ntuple to a vector, calculating means as we go std::vector<float> table(names.size() * nentries); std::vector<float> means(names.size(), 0); for (int i=0; i<nentries; i++) { for (size_t j=0; j<names.size(); j++) { float v = get_ntuple_entry(nt, i, names.at(j)); table.at(j + i * names.size()) = v; means.at(j) += v; } } // sums to means for (size_t i=0; i<names.size(); i++) { means.at(i) /= nentries; } // compute correlations for (size_t i=0; i<names.size(); i++) { for (size_t j=i; j<names.size(); j++) { float t = 0; float dx2 = 0; float dy2 = 0; for (int k=0; k<nentries; k++) { float x1 = table.at(i + k * names.size()) - means.at(i); float x2 = table.at(j + k * names.size()) - means.at(j); t += x1 * x2; dx2 += x1 * x1; dy2 += x2 * x2; } matrix.at(i * names.size() + j) = t / TMath::Sqrt(dx2 * dy2); } } return matrix; }
/** * Check whether any column in the data completely separates the response variable. * If so, return the index. */ int LogisticRegression::dataIsSeparable(const vector<vector<double> > &data, const vector<double> &response){ vector<double> covariance(data.size(), 0); vector<double> means(data.size(), 0); vector<double> variances(data.size(), 0); double varResponse = vecops::vecVariance(response); double meanResponse = vecops::vecCumSum(response); meanResponse /= response.size(); for (unsigned int i=0; i < data.size(); i++){ means[i] = vecops::vecCumSum(data[i]); variances[i] = vecops::vecVariance(data[i]); } vecops::vecDiv<double>(means, data[0].size()); // Now compute E[XY] for (unsigned int i=0; i < data.size(); i++){ for (unsigned int j=0; j < data[i].size(); j++){ covariance[i] += data[i][j] * response[j]; } } vecops::vecDiv<double>(covariance, data[0].size()); for (unsigned int i=0; i < data.size(); i++){ covariance[i] -= (means[i]*meanResponse); covariance[i] = abs(covariance[i]); } for (unsigned int i=0; i < data.size(); i++){ covariance[i] /= ( sqrt(varResponse * variances[i] )); } double mx = -1.0; int mxLocation = -1; for (unsigned int i=0; i < covariance.size(); i++){ if (covariance[i] > mx){ mx = covariance[i]; mxLocation = i; } } if (mx >LogisticRegression::SEPARABLE_THRESHOLD) return mxLocation; return -1; }
vector<int> clusterBase::simpleMeans(vector<vector<float> > data, int K) { int N = data.size(); int M = data[0].size(); vector<int> means (K, - 1); vector<double> bestDist (N, INF); vector<int> labels(N, 0); means[0] = 0; for (int k = 1; k < K; k++) { int bestCentroidCand = -1; double curBestDistOverall = 0; for (int i = 0; i < N; i++) { double dist = calculateDistance( data[i], data[means[k-1]]); if (dist < bestDist[i] + EPS) bestDist[i] = dist; if (bestDist[i] > curBestDistOverall) { curBestDistOverall = bestDist[i]; bestCentroidCand = i; } } if (bestCentroidCand >= 0) means[k] = bestCentroidCand; } for (int i = 0 ; i < N; i++) { double bestDist = INF; for (int k = 0 ; k < K; k++) { double dist = calculateDistance(data[i], data[means[k]]); if (dist < bestDist) { bestDist = dist; labels[i] = k; } } } return labels; }
void calculate_projections(const std::string file_in, const std::string file_out, Matrix<double> eigenvecs, std::size_t mem_buf_size, bool use_correlation, Matrix<double> stats) { mem_buf_size /= 4; std::size_t n_variables = stats.n_rows(); std::vector<double> means(n_variables); std::vector<double> inverse_sigmas(n_variables); std::vector<double> dih_shifts(n_variables); std::vector<double> scaled_periodicities(n_variables); for (std::size_t i=0; i < n_variables; ++i) { means[i] = stats(i,0); inverse_sigmas[i] = 1.0 / stats(i,1); if (use_correlation) { dih_shifts[i] = (stats(i,2) - means[i]) * inverse_sigmas[i]; scaled_periodicities[i] = 2*M_PI * inverse_sigmas[i]; } else { dih_shifts[i] = stats(i,2); scaled_periodicities[i] = 2*M_PI; } } // projections bool append_to_file = false; DataFileReader<double> fh_file_in(file_in, mem_buf_size); DataFileWriter<double> fh_file_out(file_out); read_blockwise(fh_file_in, [&](Matrix<double>& m) { // convert degrees to radians FastPCA::deg2rad_inplace(m); if (use_correlation) { // shift by periodic means (necessary for scaling) FastPCA::Periodic::shift_matrix_columns_inplace(m, means); // scale data by sigmas for correlated projections FastPCA::scale_matrix_columns_inplace(m, inverse_sigmas); } // shift dihedrals to minimize boundary jumps // and correct for periodic boundary condition FastPCA::Periodic::shift_matrix_columns_inplace(m, dih_shifts, scaled_periodicities); // output fh_file_out.write(m*eigenvecs, append_to_file); append_to_file = true; }); }
vector<double> VectorDataSet::mean(const std::vector<int>& patterns) { int pIndex; std::vector<double> means(numFeatures, 0);//vector of means of each feature for (unsigned int i = 0; i < patterns.size(); i++)//for each pattern listed { pIndex = patterns[i]; for (long j = 0; j < X[pIndex].size(); ++j)//run through the pattern { means[j] += X[pIndex][j]; } } for (int j = 0; j < numFeatures; ++j) {//divide the totals means[j] /= float(patterns.size()); } return means; }
int main(int argc, char** argv) { if (argc != 2) { printUsage(argv[0]); return 1; } boost::mt19937 rng; boost::normal_distribution<> nd(0.0, 1.0); boost::variate_generator<boost::mt19937&, boost::normal_distribution<> > nor(rng, nd); const size_t dimSize = 2; const size_t numElements = 300000; const size_t numClusters = 3; std::vector<Point> means(numClusters); means[0] = Point(dimSize, 1.0); means[1] = Point(dimSize); means[2] = Point(dimSize); means[2][0] = -2; means[2][1] = 2; std::vector<double> deviations; deviations.push_back(2.0); deviations.push_back(3.0); deviations.push_back(4.0); std::cout << numElements << " " << numClusters << " " << dimSize << std::endl; for (size_t i = 0; i < numElements; ++i) { int r = rand() % 3; Point point(dimSize); for (size_t j = 0; j < dimSize; ++j) { point[j] = means[r][j] + nor() * deviations[r]; } print(point); } return 0; }
bool ModelParametersGMR::saveGridData(const VectorXd& min, const VectorXd& max, const VectorXi& n_samples_per_dim, string save_directory, bool overwrite) const { if (save_directory.empty()) return true; //MatrixXd inputs; //FunctionApproximator::generateInputsGrid(min, max, n_samples_per_dim, inputs); //saveMatrix(save_directory,"n_samples_per_dim.txt",n_samples_per_dim,overwrite); int n_gaussians = means_x_.size(); int n_dims_in = means_x_[0].size(); int n_dims_out = means_y_[0].size(); int n_dims_gmm = n_dims_in + n_dims_out; std::vector<VectorXd> means(n_gaussians); std::vector<MatrixXd> covars(n_gaussians); for (int i_gau = 0; i_gau < n_gaussians; i_gau++) { means[i_gau] = VectorXd(n_dims_gmm); means[i_gau].segment(0, n_dims_in) = means_x_[i_gau]; means[i_gau].segment(n_dims_in, n_dims_out) = means_y_[i_gau]; covars[i_gau] = MatrixXd(n_dims_gmm,n_dims_gmm); covars[i_gau].fill(0); covars[i_gau].block(0, 0, n_dims_in, n_dims_in) = covars_x_[i_gau]; covars[i_gau].block(n_dims_in, n_dims_in, n_dims_out, n_dims_out) = covars_y_[i_gau]; covars[i_gau].block(n_dims_in, 0, n_dims_out, n_dims_in) = covars_y_x_[i_gau]; covars[i_gau].block(0, n_dims_in, n_dims_in, n_dims_out) = covars_y_x_[i_gau].transpose(); } saveGMM(save_directory,means,covars); return true; }
/************************************************************************* Linear regression Variant of LRBuild which uses vector of standatd deviations (errors in function values). INPUT PARAMETERS: XY - training set, array [0..NPoints-1,0..NVars]: * NVars columns - independent variables * last column - dependent variable S - standard deviations (errors in function values) array[0..NPoints-1], S[i]>0. NPoints - training set size, NPoints>NVars+1 NVars - number of independent variables OUTPUT PARAMETERS: Info - return code: * -255, in case of unknown internal error * -4, if internal SVD subroutine haven't converged * -1, if incorrect parameters was passed (NPoints<NVars+2, NVars<1). * -2, if S[I]<=0 * 1, if subroutine successfully finished LM - linear model in the ALGLIB format. Use subroutines of this unit to work with the model. AR - additional results -- ALGLIB -- Copyright 02.08.2008 by Bochkanov Sergey *************************************************************************/ void lrbuilds(const ap::real_2d_array& xy, const ap::real_1d_array& s, int npoints, int nvars, int& info, linearmodel& lm, lrreport& ar) { ap::real_2d_array xyi; ap::real_1d_array x; ap::real_1d_array means; ap::real_1d_array sigmas; int i; int j; double v; int offs; double mean; double variance; double skewness; double kurtosis; // // Test parameters // if( npoints<=nvars+1||nvars<1 ) { info = -1; return; } // // Copy data, add one more column (constant term) // xyi.setbounds(0, npoints-1, 0, nvars+1); for(i = 0; i <= npoints-1; i++) { ap::vmove(&xyi(i, 0), &xy(i, 0), ap::vlen(0,nvars-1)); xyi(i,nvars) = 1; xyi(i,nvars+1) = xy(i,nvars); } // // Standartization // x.setbounds(0, npoints-1); means.setbounds(0, nvars-1); sigmas.setbounds(0, nvars-1); for(j = 0; j <= nvars-1; j++) { ap::vmove(x.getvector(0, npoints-1), xy.getcolumn(j, 0, npoints-1)); calculatemoments(x, npoints, mean, variance, skewness, kurtosis); means(j) = mean; sigmas(j) = sqrt(variance); if( ap::fp_eq(sigmas(j),0) ) { sigmas(j) = 1; } for(i = 0; i <= npoints-1; i++) { xyi(i,j) = (xyi(i,j)-means(j))/sigmas(j); } } // // Internal processing // lrinternal(xyi, s, npoints, nvars+1, info, lm, ar); if( info<0 ) { return; } // // Un-standartization // offs = ap::round(lm.w(3)); for(j = 0; j <= nvars-1; j++) { // // Constant term is updated (and its covariance too, // since it gets some variance from J-th component) // lm.w(offs+nvars) = lm.w(offs+nvars)-lm.w(offs+j)*means(j)/sigmas(j); v = means(j)/sigmas(j); ap::vsub(&ar.c(nvars, 0), &ar.c(j, 0), ap::vlen(0,nvars), v); ap::vsub(ar.c.getcolumn(nvars, 0, nvars), ar.c.getcolumn(j, 0, nvars), v); // // J-th term is updated // lm.w(offs+j) = lm.w(offs+j)/sigmas(j); v = 1/sigmas(j); ap::vmul(&ar.c(j, 0), ap::vlen(0,nvars), v); ap::vmul(ar.c.getcolumn(j, 0, nvars), v); } }
int main (int argc, char **argv) { char *xmalloc (); char *xrealloc (); char *xstrdup (); int infpop; double nb; /* Check parameters */ Cmdline *cmd = parseCmdline (argc, argv); if ((cmd->show_helpP) | (argc == 1)) usage (); if (cmd->show_versionP) { printf ("%s %s\n", argv[0], VERSION); exit (0); } check_param (cmd); infpop = (cmd->pop == 0) ? 1 : 0; cmd->precision /= PC; cmd->prevalence /= PC; cmd->level /= PC; cmd->alpha /= PC; cmd->power /= PC; cmd->exposed /= PC; if (cmd->observedP) { nb = small_sampsi (cmd); display_small (cmd, nb); } else if (cmd->odds_ratioP && !cmd->sampleP) { cmd->ratio = floor (cmd->ratio); if (cmd->ratio < 1) sperror ("option -c should be >= 1"); case_control (cmd); } /* Absolute precision then sample size equals population size */ else if (cmd->precision == 0 && cmd->pop > 0) { nb = cmd->pop; display_surv (cmd, nb, infpop); } else if (cmd->precisionP) { nb = sampsi (cmd); display_surv (cmd, nb, infpop); } else if (cmd->binomialP) binom_ci (cmd); else if (cmd->compP && !cmd->sampleP && !cmd->deltaP) comp (cmd); else if (cmd->meansP && !cmd->sampleP && !cmd->deltaP) means (cmd); else if (cmd->sampleP && cmd->exposedP && !cmd->odds_ratioP && cmd->powerP && !cmd->matchedP) ccmin (cmd); else if (cmd->sampleP && cmd->compP && !cmd->deltaP) ppower (cmd); else if (cmd->sampleP && cmd->meansP && !cmd->deltaP) mpower (cmd); else if (cmd->sampleP && cmd->odds_ratioP && cmd->exposedP && !cmd->matchedP) ccpower (cmd); else if (cmd->sampleP && cmd->matchedP && cmd->odds_ratioP && cmd->exposedP) mccpower (cmd); else if (cmd->deltaP && cmd->compP && !cmd->sampleP) nequivp (cmd); else if (cmd->deltaP && cmd->meansP && !cmd->sampleP) nequivm (cmd); else sperror ("wrong combination of options, or missing options"); exit (0); }
void coxph_reg::estimate(const coxph_data &cdatain, const int model, const std::vector<std::string> &modelNames, const int interaction, const int ngpreds, const bool iscox, const int nullmodel, const mlinfo &snpinfo, const int cursnp) { coxph_data cdata = cdatain.get_unmasked_data(); mematrix<double> X = t_apply_model(cdata.X, model, interaction, ngpreds, iscox, nullmodel); int length_beta = X.nrow; beta.reinit(length_beta, 1); sebeta.reinit(length_beta, 1); mematrix<double> newoffset = cdata.offset - (cdata.offset).column_mean(0); mematrix<double> means(X.nrow, 1); for (int i = 0; i < X.nrow; i++) { beta[i] = 0.; } mematrix<double> u(X.nrow, 1); mematrix<double> imat(X.nrow, X.nrow); double *work = new double[X.ncol * 2 + 2 * (X.nrow) * (X.nrow) + 3 * (X.nrow)]; double loglik_int[2]; int flag; // Use Efron method of handling ties (for Breslow: 0.0), like in // R's coxph() double sctest = 1.0; // Set the maximum number of iterations that coxfit2() will run to // the default value from the class definition. int maxiterinput = MAXITER; // Make separate variables epsinput and tolcholinput that are not // const to send to coxfit2(), this way we won't have to alter // that function (which is a good thing: we want to keep it as // pristine as possible because it is copied from the R survival // package). double epsinput = EPS; double tolcholinput = CHOLTOL; coxfit2(&maxiterinput, &cdata.nids, &X.nrow, cdata.stime.data.data(), cdata.sstat.data.data(), X.data.data(), newoffset.data.data(), cdata.weights.data.data(), cdata.strata.data.data(), means.data.data(), beta.data.data(), u.data.data(), imat.data.data(), loglik_int, &flag, work, &epsinput, &tolcholinput, &sctest); // After coxfit2() maxiterinput contains the actual number of // iterations that were used. Store it in niter. niter = maxiterinput; // Check the results of the Cox fit; mirrored from the same checks // in coxph.fit.S and coxph.R from the R survival package. // A vector to indicate for which covariates the betas/se_betas // should be set to NAN. std::vector<bool> setToNAN = std::vector<bool>(X.nrow, false); // Based on coxph.fit.S lines with 'which.sing' and coxph.R line // with if(any(is.NA(coefficients))). These lines set coefficients // to NA if flag < nvar (with nvar = ncol(x)) and MAXITER > // 0. coxph.R then checks for any NAs in the coefficients and // outputs the warning message if NAs were found. if (flag < X.nrow) { int which_sing = 0; MatrixXd imateigen = imat.data; VectorXd imatdiag = imateigen.diagonal(); for (int i = 0; i < imatdiag.size(); i++) { if (imatdiag[i] == 0) { which_sing = i; setToNAN[which_sing] = true; if (i != 0) { // Don't warn for i=0 to exclude the beta // coefficient for the (constant) mean from the // check. For Cox regression the constant terms // are ignored. However, we leave it in the // calculations because otherwise the null model // calculation will fail in case there are no // other covariates than the SNP. std::cerr << "Warning for " << snpinfo.name[cursnp] << ", model " << modelNames[model] << ": X matrix deemed to be singular (variable " << which_sing + 1 << ")" << std::endl; } } } } if (niter >= MAXITER) { cerr << "Warning for " << snpinfo.name[cursnp] << ", model " << modelNames[model] << ": nr of iterations > the maximum (" << MAXITER << "): " << niter << endl; } if (flag == 1000) { cerr << "Warning for " << snpinfo.name[cursnp] << ", model " << modelNames[model] << ": Cox regression ran out of iterations and did not converge," << " setting beta and se to 'NaN'\n"; std::fill(setToNAN.begin(), setToNAN.end(), true); } else { VectorXd ueigen = u.data; MatrixXd imateigen = imat.data; VectorXd infs = ueigen.transpose() * imateigen; infs = infs.cwiseAbs(); VectorXd betaeigen = beta.data; assert(betaeigen.size() == infs.size()); // We check the beta's for all coefficients // (incl. covariates), maybe stick to only checking the SNP // coefficient? for (int i = 0; i < infs.size(); i++) { if (infs[i] > EPS && infs[i] > sqrt(EPS) * abs(betaeigen[i])) { setToNAN[i] = true; cerr << "Warning for " << snpinfo.name[cursnp] << ", model " << modelNames[model] << ": beta for covariate " << i + 1 << " may be infinite," << " setting beta and se to 'NaN'\n"; } } } for (int i = 0; i < X.nrow; i++) { if (setToNAN[i]) { // Cox regression failed somewhere, set results to NAN for // this X row (covariate or SNP) sebeta[i] = NAN; beta[i] = NAN; loglik = NAN; } else { sebeta[i] = sqrt(imat.get(i, i)); loglik = loglik_int[1]; } } delete[] work; }
void PrincipalComponentsAnalysis::compute(DataFrame& df) { if (df.getNumFactors() > 2) { // see PrincipalComponentsAnalysisTest cout << "You realize this hasn't been tested, right?" << endl; } Matrix dataMat(df.getNumFactors(), df.getNumDataVectors()); Matrix deviates(df.getNumFactors(), df.getNumDataVectors()); SymmetricMatrix covar(df.getNumFactors()); DiagonalMatrix eigenValues(df.getNumFactors()); Matrix eigenVectors; ColumnVector means(df.getNumFactors()); means = 0.0; RowVector h(df.getNumDataVectors()); h = 1.0; for (unsigned int j = 0; j < df.getNumFactors(); j++) { if (df.isNominal(j)) { throw Tgs::Exception("Only numeric values are supported."); } } for(unsigned int i = 0; i < df.getNumDataVectors(); i++) { for (unsigned int j = 0; j < df.getNumFactors(); j++) { double v = df.getDataElement(i, j); if (df.isNull(v)) { throw Tgs::Exception("Only non-null values are supported."); } dataMat.element(j, i) = v; means.element(j) += v / (double)df.getNumDataVectors(); } } try { deviates = dataMat - (means * h); covar << (1.0/(float)df.getNumDataVectors()) * (deviates * deviates.t()); Jacobi::jacobi(covar, eigenValues, eigenVectors); } catch (const std::exception&) { throw; } catch (...) { throw Tgs::Exception("Unknown error while calculating PCA"); } _sortEigens(eigenVectors, eigenValues); _components.resize(df.getNumFactors()); for (unsigned int v = 0; v < df.getNumFactors(); v++) { _components[v].resize(df.getNumFactors()); for (unsigned int d = 0; d < df.getNumFactors(); d++) { _components[v][d] = eigenVectors.element(d, v); } } }
// deal with unmatched region void dealUnMatchedRegion(const Mat& srcImg, const vector<vector<Point2f>>& srcPointsTab, const vector<vector<Point2f>>& srcFeaturesTab, const vector<int>& srcLabels, vector<int>& isMatched, vector<Mat>& transforms ) { int regionum = transforms.size(); int width = srcImg.size().width; int height = srcImg.size().height; vector<Scalar> means(regionum); computeMeans(srcImg, srcPointsTab, means); vector<Point2f> centers(regionum); computeCenters(srcPointsTab, centers); vector<vector<int>> colorNeighbors(regionum); buildColorNeighbors(means, colorNeighbors); //处理未匹配的区域 for (int i = 0; i < regionum; ++ i) { if (isMatched[i] != 0) continue; //集合颜色相近的特征点 int reIdx = i; vector<Point2f> nearFeatures(0); for (int j = 0; j < colorNeighbors[i].size(); ++j) { int nearIdx = colorNeighbors[i][j]; const vector<Point2f>& nearReFts = srcFeaturesTab[nearIdx]; copy(nearReFts.begin(), nearReFts.end(), std::back_inserter(nearFeatures)); } //颜色相近的特征点找最近的点 Point2f nearest; bool isFind = findNearestPoint(nearFeatures, centers[reIdx], nearest); if (isFind == true) { int nearIdx = srcLabels[(int)nearest.y * width + (int)nearest.x]; transforms[reIdx] = transforms[nearIdx].clone(); isMatched[reIdx] = isMatched[nearIdx]; // debug : 显示最近区域 cout << i << "-th unmatched region: the nearest one is " << nearIdx << endl; Mat test = Mat::zeros(height, width, CV_8UC3); fillRegion(srcPointsTab[i], means[i], test); circle(test, centers[i], 5, cv::Scalar(255,255,255)); for (int j = 0; j < colorNeighbors[i].size(); ++ j) { int nearIdx = colorNeighbors[i][j]; fillRegion(srcPointsTab[nearIdx], means[nearIdx], test); } circle(test, centers[nearIdx], 5, cv::Scalar(255, 255, 255)); string savefn = "output/near_regions_" + type2string(i) + ".png"; imwrite(savefn, test); //显示结束 } else { cout << i << "-th unmatched region: no similar colored region." << endl; } } }
int main(int argc, char **argv) { #ifdef QUESO_HAVE_LIBMESH unsigned int i; unsigned int j; const unsigned int num_pairs = 5; const unsigned int num_samples = 1e4; const double alpha = 3.0; const double beta = 1.0; QUESO::EnvOptionsValues opts; opts.m_seed = -1; MPI_Init(&argc, &argv); QUESO::FullEnvironment env(MPI_COMM_WORLD, "", "", &opts); #ifdef LIBMESH_DEFAULT_SINGLE_PRECISION // SLEPc farts with libMesh::Real==float libmesh_example_assert(false, "--disable-singleprecision"); #endif // Need an artificial block here because libmesh needs to // call PetscFinalize before we call MPI_Finalize #ifdef LIBMESH_HAVE_SLEPC { libMesh::LibMeshInit init(argc, argv); libMesh::Mesh mesh(init.comm()); libMesh::MeshTools::Generation::build_square(mesh, 20, 20, 0.0, 1.0, 0.0, 1.0, libMeshEnums::QUAD4); QUESO::FunctionOperatorBuilder fobuilder; fobuilder.order = "FIRST"; fobuilder.family = "LAGRANGE"; fobuilder.num_req_eigenpairs = num_pairs; QUESO::LibMeshFunction mean(fobuilder, mesh); QUESO::LibMeshNegativeLaplacianOperator precision(fobuilder, mesh); QUESO::InfiniteDimensionalGaussian mu(env, mean, precision, alpha, beta); // Vector to hold all KL coeffs std::vector<double> means(num_pairs, 0.0); std::vector<double> sumsqs(num_pairs, 0.0); std::vector<double> deltas(num_pairs, 0.0); double draw; for (i = 1; i < num_samples + 1; i++) { mu.draw(); for (j = 0; j < num_pairs; j++) { draw = mu.get_kl_coefficient(j); deltas[j] = draw - means[j]; means[j] += (double) deltas[j] / i; sumsqs[j] += deltas[j] * (draw - means[j]); } // std::cerr << "MEAN IS: " << means[0] << std::endl; } std::vector<double> vars(num_pairs, 0.0); for (j = 0; j < num_pairs; j++) { vars[j] = sumsqs[j] / (num_samples - 1); } double sigma = beta / std::pow(precision.get_eigenvalue(j), alpha / 2.0); double sigmasq = sigma * sigma; double mean_min; double mean_max; for (j = 0; j < num_pairs; j++) { // Mean is N(0, (lambda_j^{- alpha / 2} * beta)^2 / n) mean_min = -3.0 * sigma / std::sqrt(num_samples); mean_max = 3.0 * sigma / std::sqrt(num_samples); if (means[j] < mean_min || means[j] > mean_max) { std::cerr << "mean kl test failed" << std::endl; return 1; } } double var_min; double var_max; // var[j] should be approximately ~ N(sigma^2, 2 sigma^4 / (num_samples - 1)) for (j = 0; j < num_pairs; j++) { var_min = sigmasq - 3.0 * sigmasq * std::sqrt(2.0 / (num_samples - 1)); var_max = sigmasq + 3.0 * sigmasq * std::sqrt(2.0 / (num_samples - 1)); if (vars[j] < var_min || vars[j] > var_max) { std::cerr << "variance kl test failed" << std::endl; return 1; } } } #endif // LIBMESH_HAVE_SLEPC MPI_Finalize(); return 0; #else return 77; #endif }
int Look_txt() { TCHAR filter[] = TEXT("Ghemical MD results File (*.txt)\0*.txt\0") TEXT("All Files (*.*)\0*.*\0"); TCHAR fpath[1024]; TCHAR filename[1024]; sprintf(filename, "\0"); { DWORD nFilterIndex; vector<string> names; vector<string> *pnames = &names; vector<vector<double> > vectors; vectors.reserve(2000000); while (OpenFileDlg(0, filter, fpath, nFilterIndex) == S_OK) { ReadDatFile(NULL, fpath, filename, &vectors, pnames); pnames = NULL; printf("\nfilename %s\n\n", filename); int cols = names.size(); int rows = vectors.size(); #if WRITE_LOCKED_FORCES int cMom = 4 - 1; int cVx = 5 - 1; int cFxup = 14 - 1; int cFxdw = 17 - 1; int cVxup = 8 - 1; int cVxdw = 11 - 1; #endif #if WRITE_WORKED_FORCES int cMom = 4 - 1; int cVx = 5 - 1; int cVxup = 14 - 1; int cVxdw = 17 - 1; int cVx_wk_up = 8 - 1; int cVx_wk_dw = 11 - 1; int cFx_wk_up = 20 - 1; int cFx_wk_dw = 23 - 1; #endif vector<double> means(cols, 0.0); printf("vectors.size() = %d\n",rows); printf("names.size() = %d\n", cols); for (vector<vector<double> >::iterator it = vectors.begin(); it != vectors.end(); it++) { for (int c = 0; c < cols; c++) { means[c] += (*it).operator [](c); } } for (int c = 0; c < cols; c++) { means[c] /= rows; printf("mean(%s) = %f\n", names[c].c_str(), means[c]); } #if WRITE_LOCKED_FORCES || WRITE_WORKED_FORCES int r0 = 0; cout << "enter r0\n"; cin >> r0; #endif #if WRITE_LOCKED_FORCES vector<double> dF(rows-r0); for (int r = r0; r < rows; r++) { dF[r-r0] = vectors[r][cFxup] - vectors[r][cFxdw]; } Statistika (dF, "dF"); vector<double> Mom(rows-r0); for (r = r0; r < rows; r++) { Mom[r-r0] = vectors[r][cMom]; } Statistika (Mom, "Mom"); vector<double> dV(rows-r0); for (r = r0; r < rows; r++) { dV[r-r0] = vectors[r][cVxup] - vectors[r][cVxdw]; } Statistika (dV, "dV"); vector<double> Vx(rows-r0); for (r = r0; r < rows; r++) { Vx[r-r0] = vectors[r][cVx]; } Statistika (Vx, "Vx"); #endif #if WRITE_WORKED_FORCES vector<double> dF_wk(rows-r0); for (int r = r0; r < rows; r++) { dF_wk[r-r0] = vectors[r][cFx_wk_up] - vectors[r][cFx_wk_dw]; } Statistika (dF_wk, "dF_wk"); vector<double> dV_wk(rows-r0); for (r = r0; r < rows; r++) { dV_wk[r-r0] = vectors[r][cVx_wk_up] - vectors[r][cVx_wk_dw]; } Statistika (dV_wk, "dV_wk"); //if (!worked[n1]) vector<double> Mom(rows-r0); for (r = r0; r < rows; r++) { Mom[r-r0] = vectors[r][cMom]; } Statistika (Mom, "Mom"); vector<double> dV(rows-r0); for (r = r0; r < rows; r++) { dV[r-r0] = vectors[r][cVxup] - vectors[r][cVxdw]; } Statistika (dV, "dV"); vector<double> Vx(rows-r0); for (r = r0; r < rows; r++) { Vx[r-r0] = vectors[r][cVx]; } Statistika (Vx, "Vx"); #endif } } /*else { DWORD nFilterIndex; if (SaveFileDlg(0, filename, filter, nFilterIndex) == S_OK) { SetDlgItemText(ref->hDlg,IDC_EDIT_TRAJFILE2, filename); } }*/ printf("Hello World!\n"); return 0; }
// NOTE: only works with unweighted points Matrix initutil::agglomerativeMeans(commonutil::DataSet const& input, const idx_type k, const bool precompute) { GMMDesc desc; if (k==0) gmmlab_throw("initutil::agglomerativeToGMM() - Empty mixture requested."); idx_type n = input.points.cols(); idx_type d = input.points.rows(); if (n==0 || d==0) gmmlab_throw("initutil::agglomerativeToGMM() - Input is empty."); // initialize partition with trivial n-clustering std::vector<std::vector<idx_type>> partition; std::vector<Vector> partition_sums; for(idx_type i=0; i<n; ++i){ std::vector<idx_type> cluster; cluster.push_back(i); partition.push_back(cluster); partition_sums.push_back(input.points.col(i)); } // distance measure AverageLinkage averageLinkageDis; // precompute dissimilarities Matrix dis; if (precompute) { dis = Matrix::Zero(n,n); for (idx_type i=0; i<n; ++i) for (idx_type j=0; j<=i; ++j) dis(i,j) = averageLinkageDis(partition_sums.at(i), partition.at(i).size(), partition_sums.at(j), partition.at(j).size()); } for (idx_type r=n; r>k; --r) { idx_type first = -1; idx_type second = -1; fp_type min = FP_INFINITE; for (idx_type i=0; i<r; ++i) for (idx_type j=0; j<i; ++j) // j < i { //std::cout << "i=" << i << ", j=" << j << std::endl; if (i!=j) { fp_type d; if (precompute) d = dis(i,j); else d = averageLinkageDis(partition_sums.at(i), partition.at(i).size(), partition_sums.at(j), partition.at(j).size()); if ((i==1 && j==0) || d < min) { min = d; first = j; second = i; } } } // merge clusters (note: first < second) // 1. update sufficient statistics partition_sums.at(first) = partition_sums.at(first)+partition_sums.at(second); // 2. move points from second cluster to the first while(!partition.at(second).empty()) { idx_type point = partition.at(second).back(); partition.at(second).pop_back(); partition.at(first).push_back(point); } // overwrite second cluster with the last stored cluster and remove the last cluster (note: first < second) partition.at(second) = partition.back(); partition_sums.at(second) = partition_sums.back(); partition.pop_back(); partition_sums.pop_back(); if (precompute) { // cluster with index second has been overwritten by the last cluster (with index r-1) for(idx_type i=0; i<r-1; i++) { if(i!=second) { fp_type d = dis(r-1,i); if(i < second) dis(second, i) = d; else if(second < i) dis(i,second) = d; } } // compute distances wrt the newly formed cluster which is stored at index first for (idx_type i=0; i<r-1; i++) { if(i!=first){ fp_type d = averageLinkageDis(partition_sums.at(first), partition.at(first).size(), partition_sums.at(i), partition.at(i).size()); if(i < first) dis(first, i) = d; else if(first < i) dis(i,first) = d; } } } } Matrix means(d,k); for(idx_type i=0; i<k; ++i) means.col(i) = partition_sums.at(i) / partition.at(i).size(); return means; }
bool VLFeat::CalculateCommon(int f, bool all, int l) { string msg = "VLFeat::CalculateCommon("+ToStr(f)+","+ToStr(all)+","+ ToStr(l)+") : "; // if (!do_fisher && !do_vlad) { // cerr << msg // << "either encoding=fisher or encoding=vlad should be specified" // << endl; // return false; // } if (!gmm && !kmeans) { cerr << msg << "either gmm=xxx or kmeans=xxx option should be given" << endl; return false; } cox::tictac::func tt(tics, "VLFeat::CalculateCommon"); // obs! only some parameters here, should be in ProcessOptionsAndRemove() // too, also scales and geometry should be made specifiable... bool normalizeSift = false, renormalize = true, flat_window = true; size_t step = 3, binsize = 8; EnsureImage(); int width = Width(true), height = Height(true); if (FrameVerbose()) cout << msg+"wxh=" << width << "x" << height << "=" << width*height << endl; vector<float> rgbcoeff { 0.2989, 0.5870, 0.1140 }; imagedata idata = CurrentFrame(); idata.convert(imagedata::pixeldata_float); idata.force_one_channel(rgbcoeff); vector<float> dsift; size_t descr_size_orig = 0, descr_size_final = 0; vector<float> scales { 1.0000, 0.7071, 0.5000, 0.3536, 0.2500 }; // vector<float> scales { 1.0000 }; for (size_t i=0; i<scales.size(); i++) { if (KeyPointVerbose()) cout << "Starting vl_dsift_process() in scale " << scales[i] << endl; imagedata simg = idata; if (scales[i]!=1) { scalinginfo si(simg.width(), simg.height(), (int)floor(scales[i]*simg.width()+0.5), (int)floor(scales[i]*simg.height()+0.5)); simg.rescale(si, 1); } // VlDsiftFilter *sf = vl_dsift_new(simg.width(), simg.height()); VlDsiftFilter *sf = vl_dsift_new_basic(simg.width(), simg.height(), step, binsize); // opts.scales = logspace(log10(1), log10(.25), 5) ; // void vl_dsift_set_bounds ( VlDsiftFilter * self, // int minX, // int minY, // int maxX, // int maxY // ); // VlDsiftDescriptorGeometry geom = { 8, 4, 4, 0, 0 }; // vl_dsift_set_geometry(sf, &geom); //vl_dsift_set_steps(sf, 3, 3); //vl_dsift_set_window_size(sf, 8); vl_dsift_set_flat_window(sf, flat_window); // aka fast in matlab vector<float> imgvec = simg.get_float(); const float *img_fp = &imgvec[0]; // cout << "IMAGE = " << img_fp[0] << " " << img_fp[1] << " " // << img_fp[2] << " ... " << img_fp[41] << endl; vl_dsift_process(sf, img_fp); // if opts.rootSift // false // descrs{si} = sqrt(descrs{si}) ; // end // if opts.normalizeSift //true // descrs{si} = snorm(descrs{si}) ; // end descr_size_orig = sf->descrSize; size_t nf = sf->numFrames; const VlDsiftKeypoint *k = sf->frames; float *d = sf->descrs; if (KeyPointVerbose()) cout << " found " << sf->numFrames << " 'frames' in " << simg.info() << endl << " descriptor dim " << descr_size_orig << endl; if (PixelVerbose()) for (size_t i=0; i<nf; i++) { cout << " i=" << i << " x=" << k[i].x << " y=" << k[i].y << " s=" << k[i].s << " norm=" << k[i].norm; if (FullVerbose()) { cout << " RAW"; for (size_t j=0; j<descr_size_orig; j++) cout << " " << d[i*descr_size_orig+j]; } cout << endl; } if (normalizeSift) { for (size_t i=0; i<nf; i++) { if (PixelVerbose()) cout << " i=" << i << " x=" << k[i].x << " y=" << k[i].y << " s=" << k[i].s << " norm=" << k[i].norm; double mul = 0.0; for (size_t j=0; j<descr_size_orig; j++) mul += d[i*descr_size_orig+j]*d[i*descr_size_orig+j]; if (mul) mul = 1.0/sqrt(mul); if (FullVerbose()) cout << " NORM"; for (size_t j=0; j<descr_size_orig; j++) { d[i*descr_size_orig+j] *= mul; if (FullVerbose()) cout << " " << d[i*descr_size_orig+j]; } if (PixelVerbose()) cout << endl; } } if (!pca.vector_length()) { dsift.insert(dsift.end(), d, d+nf*descr_size_orig); descr_size_final = descr_size_orig; } else { for (size_t i=0; i<nf; i++) { vector<float> vin(d+i*descr_size_orig, d+(i+1)*descr_size_orig); vector<float> vout = pca.projection_coeff(vin); dsift.insert(dsift.end(), vout.begin(), vout.end()); } descr_size_final = pca.base_size(); } vl_dsift_delete(sf); } size_t numdata = dsift.size()/descr_size_final; const float *datain = &dsift[0]; vector<float> enc((do_fisher?2:1)*descriptor_dim()*nclusters()); float *dataout = &enc[0]; if (do_fisher) { if (FrameVerbose()) cout << msg << "fisher encoding " << numdata << " descriptors of size " << descr_size_orig << " => " << descr_size_final << " with gmm dimensionality " << descriptor_dim() << endl; if (descr_size_final!=descriptor_dim()) { cerr << msg << "dimensionality mismatch descr_size_final=" << descr_size_final << " descriptor_dim()=" << descriptor_dim() << endl; return false; } vl_fisher_encode(dataout, VL_TYPE_FLOAT, means(), descriptor_dim(), nclusters(), covariances(), priors(), datain, numdata, VL_FISHER_FLAG_IMPROVED) ; } if (do_vlad) { //obs! correct use of pca? if (FrameVerbose()) cout << msg << "vlad encoding " << numdata << " descriptors of size " << descr_size_final << endl; vector<vl_uint32> indexes(numdata); vector<float> distances(numdata); if (kdtree) vl_kdforest_query_with_array(kdtree, &indexes[0], 1, numdata, &distances[0], datain); else vl_kmeans_quantize(kmeans, &indexes[0], &distances[0], datain, numdata); vector<float> assignments(numdata*nclusters()); for (size_t i=0; i<numdata; i++) assignments[i * nclusters() + indexes[i]] = 1; int vlad_flags = VL_VLAD_FLAG_SQUARE_ROOT|VL_VLAD_FLAG_NORMALIZE_COMPONENTS; vl_vlad_encode(dataout, VL_TYPE_FLOAT, means(), descriptor_dim(), nclusters(), datain, numdata, &assignments[0], vlad_flags); } if (renormalize) { if (PixelVerbose()) cout << " RENORM:"; double mul = 0.0; for (size_t j=0; j<enc.size(); j++) mul += enc[j]*enc[j]; if (mul) mul = 1.0/sqrt(mul); for (size_t j=0; j<enc.size(); j++) { if (PixelVerbose()) cout << " " << enc[j]; enc[j] *= mul; if (PixelVerbose()) cout << "->" << enc[j]; } if (PixelVerbose()) cout << endl; } ((VectorData*)GetData(0))->setVector(enc); return true; }
bool testpca(bool silent) { bool result; int passcount; int maxn; int maxm; double threshold; int m; int n; int i; int j; int k; int info; ap::real_1d_array means; ap::real_1d_array s; ap::real_1d_array t2; ap::real_1d_array t3; ap::real_2d_array v; ap::real_2d_array x; double t; double h; double tmean; double tmeans; double tstddev; double tstddevs; double tmean2; double tmeans2; double tstddev2; double tstddevs2; bool pcaconverrors; bool pcaorterrors; bool pcavarerrors; bool pcaopterrors; bool waserrors; // // Primary settings // maxm = 10; maxn = 100; passcount = 1; threshold = 1000*ap::machineepsilon; waserrors = false; pcaconverrors = false; pcaorterrors = false; pcavarerrors = false; pcaopterrors = false; // // Test 1: N random points in M-dimensional space // for(m = 1; m <= maxm; m++) { for(n = 1; n <= maxn; n++) { // // Generate task // x.setbounds(0, n-1, 0, m-1); means.setbounds(0, m-1); for(j = 0; j <= m-1; j++) { means(j) = 1.5*ap::randomreal()-0.75; } for(i = 0; i <= n-1; i++) { for(j = 0; j <= m-1; j++) { x(i,j) = means(j)+(2*ap::randomreal()-1); } } // // Solve // pcabuildbasis(x, n, m, info, s, v); if( info!=1 ) { pcaconverrors = true; continue; } // // Orthogonality test // for(i = 0; i <= m-1; i++) { for(j = 0; j <= m-1; j++) { t = ap::vdotproduct(&v(0, i), v.getstride(), &v(0, j), v.getstride(), ap::vlen(0,m-1)); if( i==j ) { t = t-1; } pcaorterrors = pcaorterrors||ap::fp_greater(fabs(t),threshold); } } // // Variance test // t2.setbounds(0, n-1); for(k = 0; k <= m-1; k++) { for(i = 0; i <= n-1; i++) { t = ap::vdotproduct(&x(i, 0), 1, &v(0, k), v.getstride(), ap::vlen(0,m-1)); t2(i) = t; } calculatemv(t2, n, tmean, tmeans, tstddev, tstddevs); if( n!=1 ) { t = ap::sqr(tstddev)*n/(n-1); } else { t = 0; } pcavarerrors = pcavarerrors||ap::fp_greater(fabs(t-s(k)),threshold); } for(k = 0; k <= m-2; k++) { pcavarerrors = pcavarerrors||ap::fp_less(s(k),s(k+1)); } // // Optimality: different perturbations in V[..,0] can't // increase variance of projection - can only decrease. // t2.setbounds(0, n-1); t3.setbounds(0, n-1); for(i = 0; i <= n-1; i++) { t = ap::vdotproduct(&x(i, 0), 1, &v(0, 0), v.getstride(), ap::vlen(0,m-1)); t2(i) = t; } calculatemv(t2, n, tmean, tmeans, tstddev, tstddevs); for(k = 0; k <= 2*m-1; k++) { h = 0.001; if( k%2!=0 ) { h = -h; } ap::vmove(&t3(0), 1, &t2(0), 1, ap::vlen(0,n-1)); ap::vadd(&t3(0), 1, &x(0, k/2), x.getstride(), ap::vlen(0,n-1), h); t = 0; for(j = 0; j <= m-1; j++) { if( j!=k/2 ) { t = t+ap::sqr(v(j,0)); } else { t = t+ap::sqr(v(j,0)+h); } } t = 1/sqrt(t); ap::vmul(&t3(0), 1, ap::vlen(0,n-1), t); calculatemv(t3, n, tmean2, tmeans2, tstddev2, tstddevs2); pcaopterrors = pcaopterrors||ap::fp_greater(tstddev2,tstddev+threshold); } } } // // Special test for N=0 // for(m = 1; m <= maxm; m++) { // // Solve // pcabuildbasis(x, 0, m, info, s, v); if( info!=1 ) { pcaconverrors = true; continue; } // // Orthogonality test // for(i = 0; i <= m-1; i++) { for(j = 0; j <= m-1; j++) { t = ap::vdotproduct(&v(0, i), v.getstride(), &v(0, j), v.getstride(), ap::vlen(0,m-1)); if( i==j ) { t = t-1; } pcaorterrors = pcaorterrors||ap::fp_greater(fabs(t),threshold); } } } // // Final report // waserrors = pcaconverrors||pcaorterrors||pcavarerrors||pcaopterrors; if( !silent ) { printf("PCA TEST\n"); printf("TOTAL RESULTS: "); if( !waserrors ) { printf("OK\n"); } else { printf("FAILED\n"); } printf("* CONVERGENCE "); if( !pcaconverrors ) { printf("OK\n"); } else { printf("FAILED\n"); } printf("* ORTOGONALITY "); if( !pcaorterrors ) { printf("OK\n"); } else { printf("FAILED\n"); } printf("* VARIANCE REPORT "); if( !pcavarerrors ) { printf("OK\n"); } else { printf("FAILED\n"); } printf("* OPTIMALITY "); if( !pcaopterrors ) { printf("OK\n"); } else { printf("FAILED\n"); } if( waserrors ) { printf("TEST SUMMARY: FAILED\n"); } else { printf("TEST SUMMARY: PASSED\n"); } printf("\n\n"); } result = !waserrors; return result; }
void PopRepo::print(simulation::Population& pop,simulation::Parameters& par){ //Condensed Report, which is printed on the screen - BEGINNING //Additive Genetic (co)variances Report - BEGINNING cout<<"Genetic Additive (Co)Variance(s) inputed by the user: "******"\t"; } cout<<"\n"; } cout<<"Genetic Additive (Co)Variance(s) per generation: "<<endl; vector<vector<double> >varadd; for(unsigned short h=0;h<pop.getNumGeneration();++h){ vector<double> var(par.getNumTraits(),0); //cout<<"Generation "<<h<<":"<<endl; var=pop.calcGenAdVar(var,h); //for (unsigned short i=0;i<par.getNumTraits();++i){ //cout<<var[i]<<"\t"; //} varadd.push_back(var); //cout<<endl; } for(unsigned short h=0;h<pop.getNumGeneration();++h){ cout<<h<<"\t"; for (unsigned short i=0;i<par.getNumTraits();++i){ cout<<varadd[h][i]<<"\t"; } cout<<endl; } //Additive Genetic (co)variances Report - END //Phenotypic (co)variances Report - BEGINNING cout<<"Phenotypic (Co)Variance(s) per generation: "<<endl; vector<vector<double> >varphen; for(unsigned short h=0;h<pop.getNumGeneration();++h){ vector<double> var(par.getNumTraits(),0); //cout<<"Generation "<<h<<":"<<endl; var=pop.calcGenPhenVar(var,h); //for (unsigned short i=0;i<par.getNumTraits();++i){ //cout<<var[i]<<"\t"; //} varphen.push_back(var); //cout<<endl; } for(unsigned short h=0;h<pop.getNumGeneration();++h){ cout<<h<<"\t"; for (unsigned short i=0;i<par.getNumTraits();++i){ cout<<varphen[h][i]<<"\t"; } cout<<endl; } //Phenotypic (co)variances Report - END //Traits phenotypic means Report - BEGINNING cout<<"Traits' phenotypic means inputed by the user: "******"Trait "<<i+1<<" mean: \n"; cout<<par.getTraitMeans(i)<<"\n"; } vector<double> means(par.getNumTraits(),0); cout<<"Traits phenotypic means per generation: "<<endl; for(unsigned short k=0;k<pop.getNumGeneration();++k){ cout<<"Generation "<<k<<":"<<endl; means=pop.calcGenMean(means,k); for(unsigned short j=0;j<par.getNumTraits();++j){ cout<<means[j]<<"\t"; } cout<<endl; } //Traits phenotypic means Report - END //Heritability report - BEGINNING cout<<"Traits' heritabilities inputed by the user: "******"Trait "<<i+1<<" heritability: \n"; cout<<par.getHeritability(i)<<"\n"; } vector<vector<double> > herit(pop.getNumGeneration(),vector<double>(par.getNumTraits(),0)); cout<<"Traits heritabilities per generation: "<<endl; for(unsigned short k=0;k<pop.getNumGeneration();++k){ cout<<k<<"\t"; for(unsigned short j=0;j<par.getNumTraits();++j){ herit[k][j]=(varadd[k][j]/varphen[k][j]); cout<<herit[k][j]<<"\t"; cout<<endl; } } //Heritability report - END //Alelle Frequency report - BEGINNING // cout<<"Alelle Frequencies inputed by the user: "******"\t"; // } // cout<<"\n"; // cout<<"Alelles Frequecies per generation: "<<endl; // vector<double> afreq; // double fsum=0; // double fmean=0; // for(unsigned short h=0;h<pop.getNumGeneration();++h){ // cout<<"Generation "<<h<<":"<<endl; // afreq=pop.calcGenAlelleFreq(afreq,h); // for (unsigned short i=0;i<afreq.size();++i){ // cout<<afreq[i]<<"\t"; // fsum+=afreq[i]; // } // fmean=fsum/afreq.size(); // cout<<endl; // cout<<"Average Alelle Frequency over the loci: "<<fmean; // cout<<endl; // fsum=0; // } //Alelle Frequency report - END //Condensed Report, which is printed on the screen - END //Complete Report, which is printed to the default file popRepo - BEGINNING //Code goes here //Complete Report, which is printed to the default file popRepo - END }