double standard_deviation(const VectorXd& xx) { const double mean = xx.mean(); double accum = 0; for (int kk=0, kk_max=xx.rows(); kk<kk_max; kk++) accum += (xx(kk)-mean)*(xx(kk)-mean); return sqrt(accum)/(xx.size()-1); }
int main(int argc, char *argv[]) { using namespace Eigen; using namespace std; MatrixXd V; MatrixXi F; // Load a mesh in OFF format igl::readOFF("../shared/cheburashka.off", V, F); // Read scalar function values from a file, U: #V by 1 VectorXd U; igl::readDMAT("../shared/cheburashka-scalar.dmat",U); // Compute gradient operator: #F*3 by #V SparseMatrix<double> G; igl::grad(V,F,G); // Compute gradient of U MatrixXd GU = Map<const MatrixXd>((G*U).eval().data(),F.rows(),3); // Compute gradient magnitude const VectorXd GU_mag = GU.rowwise().norm(); igl::viewer::Viewer viewer; viewer.data.set_mesh(V, F); // Compute pseudocolor for original function MatrixXd C; igl::jet(U,true,C); // // Or for gradient magnitude //igl::jet(GU_mag,true,C); viewer.data.set_colors(C); // Average edge length divided by average gradient (for scaling) const double max_size = igl::avg_edge_length(V,F) / GU_mag.mean(); // Draw a black segment in direction of gradient at face barycenters MatrixXd BC; igl::barycenter(V,F,BC); const RowVector3d black(0,0,0); viewer.data.add_edges(BC,BC+max_size*GU, black); // Hide wireframe viewer.core.show_lines = false; viewer.launch(); }
void linear::poisson(const VectorXd& lapl, VectorXd& f) { if(stiffp1.size()==0) fill_stiff(); if(mass.size()==0) fill_mass(); VectorXd massl = mass * lapl ; int N=massl.size(); VectorXd massll = massl.tail( N-1 ); VectorXd ff= solver_stiffp1.solve( massll ); f(0) = 0; f.tail(N-1) = ff; // zero mean f = f.array() - f.mean(); return; }
void UpdaterMean::costsToWeights(const VectorXd& costs, string weighting_method, double eliteness, VectorXd& weights) const { weights.resize(costs.size()); if (weighting_method.compare("PI-BB")==0) { // PI^2 style weighting: continuous, cost exponention double h = eliteness; // In PI^2, eliteness parameter is known as "h" double range = costs.maxCoeff()-costs.minCoeff(); if (range==0) weights.fill(1); else weights = (-h*(costs.array()-costs.minCoeff())/range).exp(); } else if (weighting_method.compare("CMA-ES")==0 || weighting_method.compare("CEM")==0 ) { // CMA-ES and CEM are rank-based, so we must first sort the costs, and the assign a weight to // each rank. VectorXd costs_sorted = costs; std::sort(costs_sorted.data(), costs_sorted.data()+costs_sorted.size()); // In Python this is more elegant because we have argsort. // indices = np.argsort(costs) // It is possible to do this with fancy lambda functions or std::pair in C++ too, but I don't // mind writing two for loops instead ;-) weights.fill(0.0); int mu = eliteness; // In CMA-ES, eliteness parameter is known as "mu" assert(mu<costs.size()); for (int ii=0; ii<mu; ii++) { double cur_cost = costs_sorted[ii]; for (int jj=0; jj<costs.size(); jj++) { if (costs[jj] == cur_cost) { if (weighting_method.compare("CEM")==0) weights[jj] = 1.0/mu; // CEM else weights[jj] = log(mu+0.5) - log(ii+1); // CMA-ES break; } } } // For debugging //MatrixXd print_mat(3,costs.size()); //print_mat.row(0) = costs_sorted; //print_mat.row(1) = costs; //print_mat.row(2) = weights; //cout << print_mat << endl; } else { cout << __FILE__ << ":" << __LINE__ << ":WARNING: Unknown weighting method '" << weighting_method << "'. Calling with PI-BB weighting." << endl; costsToWeights(costs, "PI-BB", eliteness, weights); return; } // Relative standard deviation of total costs double mean = weights.mean(); double std = sqrt((weights.array()-mean).pow(2).mean()); double rel_std = std/mean; if (rel_std<1e-10) { // Special case: all costs are the same // Set same weights for all. weights.fill(1); } // Normalize weights weights = weights/weights.sum(); }
double DataPackage::calculateRowMean(const VectorXd &dataRow) { return dataRow.mean(); }
Matrix3d msac( const Eigen::Matrix2Xd& pointsFrom, const Eigen::Matrix2Xd& pointsTo, int maxNumTrials, double confidence, double maxDistance ) { double threshold = maxDistance; int numPts = pointsFrom.cols(); int idxTrial = 1; int numTrials = maxNumTrials; double maxDis = threshold * numPts; double bestDist = maxDis; Matrix3d bestT; bestT << 1, 0, 0, 0, 1, 0, 0, 0, 1; int index1; int index2; // Get two random, different numbers in [0:pointsFrom.cols()-1] std::uniform_int_distribution<int> distribution1( 0, pointsFrom.cols()-1 ); std::uniform_int_distribution<int> distribution2( 0, pointsFrom.cols()-2 ); while ( idxTrial <= numTrials ) { // Get two random, different numbers in [0:pointsFrom.cols()-1] index1 = distribution1( msacGenerator ); index2 = distribution2( msacGenerator ); if ( index2 >= index1 ) index2++; Vector2d indices( index1, index2 ); /*std::cout << "indices: " << indices.transpose() << " pointsFrom.cols: " << pointsFrom.cols() << " pointsTo.cols: " << pointsTo.cols() << std::endl;*/ // Get T form Calculated from this set of points Matrix3d T = computeTform( pointsFrom, pointsTo, indices ); VectorXd dis = evaluateTform( pointsFrom, pointsTo, T, threshold ); double accDis = dis.sum(); if ( accDis < bestDist ) { bestDist = accDis; bestT = T; } idxTrial++; } VectorXd dis = evaluateTform( pointsFrom, pointsTo, bestT, threshold ); threshold *= dis.mean(); int numInliers = 0; for ( int i = 0; i < dis.rows(); i++ ){ if ( dis(i) < threshold ) numInliers++; } VectorXd inliers( numInliers ); int j = 0; for ( int i = 0; i < dis.rows(); i++ ){ if ( dis(i) < threshold ) inliers(j++) = i; } Matrix3d T; if ( numInliers >= 2 ) T = computeTform( pointsFrom, pointsTo, inliers ); else T << 1, 0, 0, 0, 1, 0, 0, 0, 1; return T; }
// barebones version of the lasso for fixed lambda // Eigen library is used for linear algebra // x .............. predictor matrix // y .............. response // lambda ......... penalty parameter // useSubset ...... logical indicating whether lasso should be computed on a // subset // subset ......... indices of subset on which lasso should be computed // normalize ...... logical indicating whether predictors should be normalized // useIntercept ... logical indicating whether intercept should be included // eps ............ small numerical value (effective zero) // useGram ........ logical indicating whether Gram matrix should be computed // in advance // useCrit ........ logical indicating whether to compute objective function void fastLasso(const MatrixXd& x, const VectorXd& y, const double& lambda, const bool& useSubset, const VectorXi& subset, const bool& normalize, const bool& useIntercept, const double& eps, const bool& useGram, const bool& useCrit, // intercept, coefficients, residuals and objective function are returned // through the following parameters double& intercept, VectorXd& beta, VectorXd& residuals, double& crit) { // data initializations int n, p = x.cols(); MatrixXd xs; VectorXd ys; if(useSubset) { n = subset.size(); xs.resize(n, p); ys.resize(n); int s; for(int i = 0; i < n; i++) { s = subset(i); xs.row(i) = x.row(s); ys(i) = y(s); } } else { n = x.rows(); xs = x; // does this copy memory? ys = y; // does this copy memory? } double rescaledLambda = n * lambda / 2; // center data and store means RowVectorXd meanX; double meanY; if(useIntercept) { meanX = xs.colwise().mean(); // columnwise means of predictors xs.rowwise() -= meanX; // sweep out columnwise means meanY = ys.mean(); // mean of response for(int i = 0; i < n; i++) { ys(i) -= meanY; // sweep out mean } } else { meanY = 0; // just to avoid warning, this is never used // intercept = 0; // zero intercept } // some initializations VectorXi inactive(p); // inactive predictors int m = 0; // number of inactive predictors VectorXi ignores; // indicates variables to be ignored int s = 0; // number of ignored variables // normalize predictors and store norms RowVectorXd normX; if(normalize) { normX = xs.colwise().norm(); // columnwise norms double epsNorm = eps * sqrt(n); // R package 'lars' uses n, not n-1 for(int j = 0; j < p; j++) { if(normX(j) < epsNorm) { // variance is too small: ignore variable ignores.append(j, s); s++; // set norm to tolerance to avoid numerical problems normX(j) = epsNorm; } else { inactive(m) = j; // add variable to inactive set m++; // increase number of inactive variables } xs.col(j) /= normX(j); // sweep out norm } // resize inactive set and update number of variables if necessary if(m < p) { inactive.conservativeResize(m); p = m; } } else { for(int j = 0; j < p; j++) inactive(j) = j; // add variable to inactive set m = p; } // compute Gram matrix if requested (saves time if number of variables is // not too large) MatrixXd Gram; if(useGram) { Gram.noalias() = xs.transpose() * xs; } // further initializations for iterative steps RowVectorXd corY; corY.noalias() = ys.transpose() * xs; // current correlations beta.resize(p+s); // final coefficients // compute lasso solution if(p == 1) { // special case of only one variable (with sufficiently large norm) int j = inactive(0); // set maximum step size in the direction of that variable double maxStep = corY(j); if(maxStep < 0) maxStep = -maxStep; // absolute value // compute coefficients for least squares solution VectorXd betaLS = xs.col(j).householderQr().solve(ys); // compute lasso coefficients beta.setZero(); if(rescaledLambda < maxStep) { // interpolate towards least squares solution beta(j) = (maxStep - rescaledLambda) * betaLS(0) / maxStep; } } else { // further initializations for iterative steps VectorXi active; // active predictors int k = 0; // number of active predictors // previous and current regression coefficients VectorXd previousBeta = VectorXd::Zero(p+s), currentBeta = VectorXd::Zero(p+s); // previous and current penalty parameter double previousLambda = 0, currentLambda = 0; // indicates variables to be dropped VectorXi drops; // keep track of sign of correlations for the active variables // (double precision is necessary for solve()) VectorXd signs; // Cholesky L of Gram matrix of active variables MatrixXd L; int rank = 0; // rank of Cholesky L // maximum number of variables to be sequenced int maxActive = findMaxActive(n, p, useIntercept); // modified LARS algorithm for lasso solution while(k < maxActive) { // extract current correlations of inactive variables VectorXd corInactiveY(m); for(int j = 0; j < m; j++) { corInactiveY(j) = corY(inactive(j)); } // compute absolute values of correlations and find maximum VectorXd absCorInactiveY = corInactiveY.cwiseAbs(); double maxCor = absCorInactiveY.maxCoeff(); // update current lambda if(k == 0) { // no active variables previousLambda = maxCor; } else { previousLambda = currentLambda; } currentLambda = maxCor; if(currentLambda <= rescaledLambda) break; if(drops.size() == 0) { // new active variables VectorXi newActive = findNewActive(absCorInactiveY, maxCor - eps); // do calculations for new active variables for(int j = 0; j < newActive.size(); j++) { // update Cholesky L of Gram matrix of active variables // TODO: put this into void function int newJ = inactive(newActive(j)); VectorXd xNewJ; double newX; if(useGram) { newX = Gram(newJ, newJ); } else { xNewJ = xs.col(newJ); newX = xNewJ.squaredNorm(); } double normNewX = sqrt(newX); if(k == 0) { // no active variables, L is empty L.resize(1,1); L(0, 0) = normNewX; rank = 1; } else { VectorXd oldX(k); if(useGram) { for(int j = 0; j < k; j++) { oldX(j) = Gram(active(j), newJ); } } else { for(int j = 0; j < k; j++) { oldX(j) = xNewJ.dot(xs.col(active(j))); } } VectorXd l = L.triangularView<Lower>().solve(oldX); double lkk = newX - l.squaredNorm(); // check if L is machine singular if(lkk > eps) { // no singularity: update Cholesky L lkk = sqrt(lkk); rank++; // add new row and column to Cholesky L // this is a little trick: sometimes we need // lower triangular matrix, sometimes upper // hence we define quadratic matrix and use // triangularView() to interpret matrix the // correct way L.conservativeResize(k+1, k+1); for(int j = 0; j < k; j++) { L(k, j) = l(j); L(j, k) = l(j); } L(k,k) = lkk; } } // add new variable to active set or drop it for good // in case of singularity if(rank == k) { // singularity: drop variable for good ignores.append(newJ, s); s++; // increase number of ignored variables p--; // decrease number of variables if(p < maxActive) { // adjust maximum number of active variables maxActive = p; } } else { // no singularity: add variable to active set active.append(newJ, k); // keep track of sign of correlation for new active variable signs.append(sign(corY(newJ)), k); k++; // increase number of active variables } } // remove new active or ignored variables from inactive variables // and corresponding vector of current correlations inactive.remove(newActive); corInactiveY.remove(newActive); m = inactive.size(); // update number of inactive variables } // prepare for computation of step size // here double precision of signs is necessary VectorXd b = L.triangularView<Lower>().solve(signs); VectorXd G = L.triangularView<Upper>().solve(b); // correlations of active variables with equiangular vector double corActiveU = 1/sqrt(G.dot(signs)); // coefficients of active variables in linear combination forming the // equiangular vector VectorXd w = G * corActiveU; // note that this has the right signs // equiangular vector VectorXd u; if(!useGram) { // we only need equiangular vector if we don't use the precomputed // Gram matrix, otherwise we can compute the correlations directly // from the Gram matrix u = VectorXd::Zero(n); for(int i = 0; i < n; i++) { for(int j = 0; j < k; j++) { u(i) += xs(i, active(j)) * w(j); } } } // compute step size in equiangular direction double step; if(k < maxActive) { // correlations of inactive variables with equiangular vector VectorXd corInactiveU(m); if(useGram) { for(int j = 0; j < m; j++) { corInactiveU(j) = 0; for(int i = 0; i < k; i++) { corInactiveU(j) += w(i) * Gram(active(i), inactive(j)); } } } else { for(int j = 0; j < m; j++) { corInactiveU(j) = u.dot(xs.col(inactive(j))); } } // compute step size in the direction of the equiangular vector step = findStep(maxCor, corInactiveY, corActiveU, corInactiveU, eps); } else { // last step: take maximum possible step step = maxCor/corActiveU; } // adjust step size if any sign changes and drop corresponding variables drops = findDrops(currentBeta, active, w, eps, step); // update current regression coefficients previousBeta = currentBeta; for(int j = 0; j < k; j++) { currentBeta(active(j)) += step * w(j); } // update current correlations if(useGram) { // we also need to do this for active variables, since they may be // dropped at a later stage // TODO: computing a vector step * w in advance may save some computation time for(int j = 0; j < Gram.cols(); j++) { for(int i = 0; i < k; i++) { corY(j) -= step * w(i) * Gram(active(i), j); } } } else { ys -= step * u; // take step in equiangular direction corY.noalias() = ys.transpose() * xs; // update correlations } // drop variables if necessary if(drops.size() > 0) { // downdate Cholesky L // TODO: put this into void function for(int j = drops.size()-1; j >= 0; j--) { // variables need to be dropped in descending order int drop = drops(j); // index with respect to active set // modify upper triangular part as in R package 'lars' // simply deleting columns is not enough, other computations // necessary but complicated due to Fortran code L.removeCol(drop); VectorXd z = VectorXd::Constant(k, 1, 1); k--; // decrease number of active variables for(int i = drop; i < k; i++) { double a = L(i,i), b = L(i+1,i); if(b != 0.0) { // compute the rotation double tau, s, c; if(std::abs(b) > std::abs(a)) { tau = -a/b; s = 1.0/sqrt(1.0+tau*tau); c = s * tau; } else { tau = -b/a; c = 1.0/sqrt(1.0+tau*tau); s = c * tau; } // update 'L' and 'z'; L(i,i) = c*a - s*b; for(int j = i+1; j < k; j++) { a = L(i,j); b = L(i+1,j); L(i,j) = c*a - s*b; L(i+1,j) = s*a + c*b; } a = z(i); b = z(i+1); z(i) = c*a - s*b; z(i+1) = s*a + c*b; } } // TODO: removing all rows together may save some computation time L.conservativeResize(k, NoChange); rank--; } // mirror lower triangular part for(int j = 0; j < k; j++) { for(int i = j+1; i < k; i++) { L(i,j) = L(j,i); } } // add dropped variables to inactive set and make sure // coefficients are 0 inactive.conservativeResize(m + drops.size()); for(int j = 0; j < drops.size(); j++) { int newInactive = active(drops(j)); inactive(m + j) = newInactive; currentBeta(newInactive) = 0; // make sure coefficient is 0 } m = inactive.size(); // update number of inactive variables // drop variables from active set and sign vector // number of active variables is already updated above active.remove(drops); signs.remove(drops); } } // interpolate coefficients for given lambda if(k == 0) { // lambda larger than largest lambda from steps of LARS algorithm beta.setZero(); } else { // penalty parameter within two steps if(k == maxActive) { // current coefficients are the least squares solution (in the // high-dimensional case, as far along the solution path as possible) // current and previous values of the penalty parameter need to be // reset for interpolation previousLambda = currentLambda; currentLambda = 0; } // interpolate coefficients beta = ((rescaledLambda - currentLambda) * previousBeta + (previousLambda - rescaledLambda) * currentBeta) / (previousLambda - currentLambda); } } // transform coefficients back VectorXd normedBeta; if(normalize) { if(useCrit) normedBeta = beta; for(int j = 0; j < p; j++) beta(j) /= normX(j); } if(useIntercept) intercept = meanY - beta.dot(meanX); // compute residuals for all observations n = x.rows(); residuals = y - x * beta; if(useIntercept) { for(int i = 0; i < n; i++) residuals(i) -= intercept; } // compute value of objective function on the subset if(useCrit && useSubset) { if(normalize) crit = objective(normedBeta, residuals, subset, lambda); else crit = objective(beta, residuals, subset, lambda); } }