Beispiel #1
0
int CLogistic::PredictOnevsAll(const VectorXd& point)
{

	// compute prediction for one vs. all
	VectorXd prob = (point * classifier.transpose());

	// pick the most expressive
	return prob.maxCoeff();
}
Beispiel #2
0
double CGppe::get_fbest(int N)
{
    VectorXd ftest;
    double fbest;

    ftest = f.segment(f.rows() - N, N - 1);
    fbest = ftest.maxCoeff();
    if (fbest != fbest)
        fbest = f.maxCoeff();
    return fbest;
}
Beispiel #3
0
  // Updates the inference result.
  // Hand position is in mm.
  std::string InfEngine::Update(float* raw_feature) {
    using Eigen::Map;
    using Eigen::VectorXf;
    using Eigen::VectorXd;
    using std::string;

    int gesture_index = 0;
    int handpose_index = 0;
    string stage = "Unknown";

    json_spirit::mObject result;

    if (raw_feature != NULL) {
      int motion_feature_len = feature_len_ - n_principal_comps_;

      Map<VectorXf> des(raw_feature + motion_feature_len, descriptor_len_);
      VectorXf res(n_principal_comps_);
      res.noalias() = principal_comp_ * (des - pca_mean_);

      Map<VectorXf> motion_feature(raw_feature, motion_feature_len);
      VectorXf full_feature(feature_len_);
      full_feature << motion_feature, res;
      // Normalize feature.
      full_feature = (full_feature - std_mu_).cwiseQuotient(std_sigma_);

      if (svm_classifier_) {
        VectorXd prob = svm_classifier_->Predict(full_feature);
        VectorXd::Index max_index;
        prob.maxCoeff(&max_index);
        handpose_index = (int) max_index;
      }

      if (hmm_) {
        hmm_->Fwdback(full_feature);
        gesture_index = hmm_->MostLikelyLabelIndex();
        stage = hmm_->MostLikelyStage();
      }

      result["rightX"] = (int) (motion_feature[0] * 1000);
      result["rightY"] = (int) (motion_feature[1] * 1000);
    }

    const string& gesture_label = gesture_labels_[gesture_index];
    gesture_event_detector_.Detect(gesture_label, stage, &result);
    std::string s = write(result, json_spirit::pretty_print | json_spirit::raw_utf8);
    return s;
  }
Beispiel #4
0
int main(int argc, char *argv[])
{
  using namespace Eigen;
  using namespace std;

  cout<<"Usage:"<<endl;
  cout<<"[space]  toggle showing surface."<<endl;
  cout<<"'.'/','  push back/pull forward slicing plane."<<endl;
  cout<<endl;

  // Load mesh: (V,T) tet-mesh of convex hull, F contains original surface
  // triangles
  igl::readMESH("../shared/bunny.mesh",V,T,F);


  // Encapsulated call to point_mesh_squared_distance to determine bounds
  {
    VectorXd sqrD;
    VectorXi I;
    MatrixXd C;
    igl::point_mesh_squared_distance(V,V,F,sqrD,I,C);
    max_distance = sqrt(sqrD.maxCoeff());
  }

  // Precompute signed distance AABB tree
  tree.init(V,F);
  // Precompute vertex,edge and face normals
  igl::per_face_normals(V,F,FN);
  igl::per_vertex_normals(
    V,F,igl::PER_VERTEX_NORMALS_WEIGHTING_TYPE_ANGLE,FN,VN);
  igl::per_edge_normals(
    V,F,igl::PER_EDGE_NORMALS_WEIGHTING_TYPE_UNIFORM,FN,EN,E,EMAP);

  // Plot the generated mesh
  igl::viewer::Viewer viewer;
  update_visualization(viewer);
  viewer.callback_key_down = &key_down;
  viewer.core.show_lines = false;
  viewer.launch();
}
Beispiel #5
0
double CGppe::maximum_expected_improvement(const VectorXd & theta_t, const VectorXd& theta_x, const double& sigma,
        const MatrixXd& t, const MatrixXd & x, const VectorXd& idx_global, const VectorXd& ind_t, const VectorXd& ind_x, MatrixXd tstar, int N, double fbest)
{
    VectorXd idx_xstar=Nfirst(N);
    int Kt_ss = 1;
    double  mei;
    MatrixXd Kx_star, Kx_star_star, kstar, Kss, Css;
    MatrixXd Kt_star = covfunc_t->Compute(t, tstar);
	//dsp(GetKinv(),"Kinv");


    Kx_star = GetMatRow(Kx, idx_xstar.transpose()); //maybe need some transpose?

    Kx_star_star = GetMat(Kx, idx_xstar.transpose(), idx_xstar.transpose()); // test to test
    kstar = Kron(Kt_star, Kx_star);

    kstar = GetMatRow(kstar, idx_global);
    Kss = Kt_ss * Kx_star_star;


    mustar = kstar.transpose() * Kinv * GetVec(f, idx_global);
    Css    = Kss - kstar.transpose() * W * llt.solve(Kinv * kstar);
    varstar = Css.diagonal();


    VectorXd sigmastar = sqrt(varstar.array());
    VectorXd z = (fbest - mustar.array()) / sigmastar.array();
    VectorXd pdfval = normpdf(z);
    VectorXd cdfval = normcdf(z);
    VectorXd inter = z.array() * (1 - cdfval.array());
    VectorXd el = sigmastar.cwiseProduct(inter - pdfval);

	el=-1*el;
    mei = el.maxCoeff();
    //dsp(mei,"mei");
    return mei;
}
Beispiel #6
0
void  UpdaterMean::costsToWeights(const VectorXd& costs, string weighting_method, double eliteness, VectorXd& weights) const
{
  weights.resize(costs.size());
  if (weighting_method.compare("PI-BB")==0)
  {
    // PI^2 style weighting: continuous, cost exponention
    double h = eliteness; // In PI^2, eliteness parameter is known as "h"
    double range = costs.maxCoeff()-costs.minCoeff();
    if (range==0)
      weights.fill(1);
    else
      weights = (-h*(costs.array()-costs.minCoeff())/range).exp();
  } 
  else if (weighting_method.compare("CMA-ES")==0 || weighting_method.compare("CEM")==0 )
  {
    // CMA-ES and CEM are rank-based, so we must first sort the costs, and the assign a weight to 
    // each rank.
    VectorXd costs_sorted = costs; 
    std::sort(costs_sorted.data(), costs_sorted.data()+costs_sorted.size());
    // In Python this is more elegant because we have argsort.
    // indices = np.argsort(costs)
    // It is possible to do this with fancy lambda functions or std::pair in C++ too, but  I don't
    // mind writing two for loops instead ;-)
    
    weights.fill(0.0);
    int mu = eliteness; // In CMA-ES, eliteness parameter is known as "mu"
    assert(mu<costs.size());
    for (int ii=0; ii<mu; ii++)
    {
      double cur_cost = costs_sorted[ii];
      for (int jj=0; jj<costs.size(); jj++)
      {
        if (costs[jj] == cur_cost)
        {
          if (weighting_method.compare("CEM")==0)
            weights[jj] = 1.0/mu; // CEM
          else
            weights[jj] = log(mu+0.5) - log(ii+1); // CMA-ES
          break;
        }
      }
      
    }
    // For debugging
    //MatrixXd print_mat(3,costs.size());
    //print_mat.row(0) = costs_sorted;
    //print_mat.row(1) = costs;
    //print_mat.row(2) = weights;
    //cout << print_mat << endl;
  }
  else
  {
    cout << __FILE__ << ":" << __LINE__ << ":WARNING: Unknown weighting method '" << weighting_method << "'. Calling with PI-BB weighting." << endl; 
    costsToWeights(costs, "PI-BB", eliteness, weights);
    return;
  }
  
  // Relative standard deviation of total costs
  double mean = weights.mean();
  double std = sqrt((weights.array()-mean).pow(2).mean());
  double rel_std = std/mean;
  if (rel_std<1e-10)
  {
      // Special case: all costs are the same
      // Set same weights for all.
      weights.fill(1);
  }

  // Normalize weights
  weights = weights/weights.sum();

}
Beispiel #7
0
bool ClassLayouter::computeNormally()
{
    FuzzyDependAttr::Ptr fuzzyAttr = m_parent->getAttr<FuzzyDependAttr>();
    if (!fuzzyAttr)
        return false;

    Graph G;
    GraphAttributes GA(G,
                       GraphAttributes::nodeGraphics | GraphAttributes::edgeGraphics |
                       GraphAttributes::nodeLabel    | GraphAttributes::nodeTemplate |
                       GraphAttributes::edgeDoubleWeight);

    SparseMatrix& veMat = fuzzyAttr->vtxEdgeMatrix();
    VectorXd	  edgeWeight	= fuzzyAttr->edgeWeightVector();
    if (edgeWeight.size() != veMat.cols())
    {
        m_status |= WARNING_USE_DEFAULT_EDGE_WEIGHT;
        edgeWeight.setOnes(veMat.cols());
    }
    const int nNodes = veMat.rows();
    const int nEdges = veMat.cols();
    if (nNodes <= 0 || nNodes != m_childList.size() || nEdges < 1)
    {
        m_status |= WARNING_NO_EDGE;
        return false;
    }

    bool isUseOrthoLayout = nEdges < 50;

    vector<node> nodeArray;
    vector<edge> edgeArray;
    NodeArray<float> nodeSize(G);
    EdgeArray<double> edgeLength(G);
    for (int i = 0; i < nNodes; ++i)
    {
        nodeArray.push_back(G.newNode());
        float r = m_nodeRadius[i];
        GA.width(nodeArray.back()) = r*2;
        GA.height(nodeArray.back()) = r*2;
        nodeSize[nodeArray.back()] = r * 2;
    }

    float maxEdgeWeight = edgeWeight.maxCoeff();
    float minEdgeWeight = edgeWeight.minCoeff();
    for (int ithEdge = 0; ithEdge < nEdges; ++ithEdge)
    {
        int src, dst;
        GraphUtility::getVtxFromEdge(veMat, ithEdge, src, dst);
        edgeArray.push_back(G.newEdge(nodeArray[src], nodeArray[dst]));
        //GA.doubleWeight(edgeArray.back()) = edgeWeight[ithEdge];
        edgeLength[edgeArray.back()] = 1;//log(edgeWeight[ithEdge] + 1);
    }


    // add dummy vertices and edges in order to merge parallel edge segments attached to the same node
    VectorXi compVec;
    int nComp = 1;
    const float dummyNodeRadius = 0.01;
    if(m_isMergeEdgeEnd && isUseOrthoLayout && GraphUtility::findConnectedComponentsVE(	veMat, compVec, nComp ))
    {
        bool* isCompSet = new bool[nComp];
        for (int i = 0; i < nComp; ++i)
            isCompSet[i] = false;

        // add a dummy node and edge for each connect component
        for (int ithVtx = 0; ithVtx < compVec.size(); ++ithVtx)
        {
            int ithCmp = compVec[ithVtx];
            if (isCompSet[ithCmp] == false)
            {
                // add dummy node and set its radius
                nodeArray.push_back(G.newNode());
                GA.width(nodeArray.back()) = dummyNodeRadius;
                GA.height(nodeArray.back()) = dummyNodeRadius;
                // add dummy edge
                edgeArray.push_back(G.newEdge(nodeArray[ithVtx], nodeArray.back()));
                isCompSet[ithCmp] = true;
            }
        }
        delete[] isCompSet;
    }


    MatrixXd pos;
    pos.resize(nNodes, 2);
    try
    {
        if (isUseOrthoLayout)
        {
            PlanarizationLayout layouter;
            OrthoLayout *ol = new OrthoLayout;
            float sep = max(m_nodeRadius.sum() / nNodes, LayoutSetting::s_baseRadius * 12);
            ol->separation(sep);
            ol->cOverhang(0.1);
            ol->setOptions(1+4);
            layouter.setPlanarLayouter(ol);
            layouter.call(GA);
            for (int v = 0; v < nNodes; ++v)
            {
                double x = GA.x(nodeArray[v]);
                double y = GA.y(nodeArray[v]);
                pos(v,0) = x;
                pos(v,1) = y;
            }
        }
        else
        {
            FMMMLayout layouter;
            //CircularLayout layouter;
            float avgRadius = m_nodeRadius.sum();
            layouter.unitEdgeLength(avgRadius * 4);
            //layouter.call(GA, edgeLength);
            layouter.call(GA);
            // 	layouter.resizeDrawing(true);
            // 	layouter.resizingScalar(3);

            for (int v = 0; v < nNodes; ++v)
            {
                double x = GA.x(nodeArray[v]);
                double y = GA.y(nodeArray[v]);
                pos(v,0) = x;
                pos(v,1) = y;
            }

            MDSPostProcesser m_postProcessor(5000, 1, 1.0, 1.0, LayoutSetting::s_baseRadius * 2);
            m_postProcessor.set2DPos(pos, m_nodeRadius.cast<double>());
            m_postProcessor.compute();
            m_postProcessor.getFinalPos(pos);
        }
    }
    catch(...)//AlgorithmFailureException e
    {
        return false;
    }

    VectorXd halfSize,offset;
    GeometryUtility::moveToOrigin(pos, m_nodeRadius.cast<double>(), halfSize, &offset);
    m_nodePos = pos.cast<float>();

    // postprocessing, and set edges
    float edgeBound[2] = {halfSize[0], halfSize[1]};
    if (isUseOrthoLayout)
    {
        for(int ithEdge = 0; ithEdge < nEdges; ++ithEdge)
        {
            DPolyline& p = GA.bends(edgeArray[ithEdge]);
            int nPoints = p.size();

            BSpline splineBuilder(5, BSpline::BSPLINE_OPEN_UNIFORM, true);
            int ithPnt = 0;
            for (DPolyline::iterator pLine = p.begin(); pLine != p.end(); ++pLine, ++ithPnt)
            {
                float px = (*pLine).m_x + offset[0];
                float py = (*pLine).m_y + offset[1];
                splineBuilder.addPoint(px,py);
                splineBuilder.addPoint(px,py);
            }
            splineBuilder.computeLine((nPoints) * 5);
            const QList<QPointF>& curve = splineBuilder.getCurvePnts();

            m_edgeParam.push_back(EdgeParam());
            EdgeParam& ep = m_edgeParam.back();
            ep.m_points.resize(curve.size(),2);
            for (int i = 0; i < curve.size(); ++i)
            {
                ep.m_points(i, 0) = curve[i].x();
                ep.m_points(i, 1) = curve[i].y();

                edgeBound[0] = qMax(edgeBound[0], (float)qAbs(curve[i].x()));
                edgeBound[1] = qMax(edgeBound[1], (float)qAbs(curve[i].y()));
            }
            ep.m_weight = edgeWeight[ithEdge];
        }
    }
    else
    {
        DelaunayCore::DelaunayRouter router;
        router.setLaneWidthRatio(2);
        router.setSmoothParam(0.5,2);
        router.setEndPointNormalRatio(0.8);
        computeEdgeRoute(router);
    }

    m_totalRadius = qSqrt(edgeBound[0]*edgeBound[0] + edgeBound[1]*edgeBound[1]);
    return true;
}
void
AutoSeg::autoSegment()
{
  // Total timing
  double last = getTime ();

  // Step 1: filter the input cloud
  //double last_f = pcl::getTime ();
  SampleFilter filter (input_cloud_);
  filter.setMaxPtLimit (input_task_->getMaxPatchDistance());
  filter.filter();
  filtered_cloud_ = filter.cloud_filtered_;
  //double now_f = pcl::getTime ();
  //cerr << "Time for filter: " << now_f-last_f << endl;

  // Return if no points are valid
  if (!AutoSeg::numValidPoints(filtered_cloud_))
    return;
  
  // Update the viewer
  if (do_vis_ && show_filtered_ && viewer_)
    AutoSeg::showFilteredCloud();

  // Decimation ratio
  float dec_ratio = static_cast<float> (nd) / 
                    (2.0*static_cast<float> (filtered_cloud_->points.size ()));

  // Step 2: find salient points on the filtered cloud
  //double last_sal = pcl::getTime ();
  sal_ = new SampleSaliency;
  
  sal_->setInputCloud (filtered_cloud_);
  sal_->setNNSize (2.0f*radius_, 580.0f/dec_ratio);
  sal_->setNNCAThres (MAX_DON_COS_ANGLE);
  if (use_gravity_) {sal_->setG (g_);}
  if (use_gravity_) {sal_->setNGCAThres (MAX_DONG_COS_ANGLE);}
  sal_->setNMS(do_nms_);
  
  sal_->extractSalientPoints ();
  
  // Keep only MAX_SAL_POINTS random points
  // TBD: do it in sal_
  if ((sal_->getIndxSalient())->size() > MAX_SAL_POINTS)
  {
    random_shuffle((sal_->getIndxSalient())->begin(), (sal_->getIndxSalient())->end());
    (sal_->getIndxSalient())->resize (MAX_SAL_POINTS);
  }
  
  //double now_sal = pcl::getTime ();
  //cerr << "Time for sal: " << now_sal-last_sal << endl;

  // Update viewer with normals
  if (do_vis_ && viewer_)
  {
    sal_->setViewer (viewer_);
    if (show_sal_filtered_) sal_->showDtFPCloud ();
    if (show_fixation_) sal_->showFixation ();
    if (show_salient_) sal_->showSalPoints (true);
    if (show_sal_curvatures_) sal_->showCurvature (true);
    if (show_sal_normals_) sal_->showNormals (true);
  }

  if (do_vis_ && show_patches_ && viewer_) removePatchPlots();

  // Create nn object
  search::OrganizedNeighbor<PointXYZ> search;
  search.setInputCloud(filtered_cloud_);
  
  // For each seed point
  ns = 0; // set number of current seeds to zero
  int num_patches = 0;
  for (int si=0; si<(sal_->getIndxSalient())->size (); si++)
  {
    // generate seeds until some termination condition holds
    //if (AutoSeg::terminate())
    //  break;

    // Step 3: generate and validate a new seed
    seed = (sal_->getIndxSalient())->at(si);
    if (!isSeedValid())
      continue;
    seeds.push_back(seed);
    ns++;

    // Step 4: fetch and validate neighborhood
    //double last_nn = pcl::getTime ();
    search.radiusSearch(filtered_cloud_->points[seed], radius_, nn_indices, nn_sqr_distances);
    nn_cloud_->points.resize (0);
    for (int i=0; i<nn_indices.size(); i++)
      nn_cloud_->points.push_back(filtered_cloud_->points[nn_indices[i]]);
    //double now_nn = pcl::getTime ();
    //cerr << "Time for nn: " << now_nn-last_nn << endl;
   
    if (do_vis_ && show_nn_ && viewer_)
      AutoSeg::showNN();

    // Step 5: fit the patch
    //double last_pf = pcl::getTime ();
    PatchFit *patchFit;
    patchFit = new PatchFit(nn_cloud_);
    patchFit->setSSmax (50);
    patchFit->fit();
    //double now_pf = pcl::getTime ();
    //cerr << "Time for fitting " << nn_cloud_->points.size() <<
    //        " points: " << now_pf-last_pf << endl;
  

    // Step 6: validate patch
    //double last_val = pcl::getTime ();
    if (do_validation_)
    {
      (*patchFit->p_).computeResidual (nn_cloud_);
      if ((*patchFit->p_).getResidual() > t_residual_)
        continue;

      VectorXd k;
      k = (*patchFit->p_).getK ();
      if (k.minCoeff () < t_min_curv_ || k.maxCoeff ()>t_max_curv_)
        continue;
    }
    num_patches++;
    //double now_val = pcl::getTime ();
    //cerr << "Time for validation: " << now_val-last_val << endl;

    // Visualize patch
    //double last_pp = pcl::getTime ();
    if (do_vis_ && show_patches_ && viewer_)
    {
      PatchPlot *patch_plot;
      (*patchFit->p_).setID (ns);
      (*patchFit->p_).setSS (0.025);
      (*patchFit->p_).infer_params();
      (*patchFit->p_).gs();

      //PatchPlot patch_plot (*patchFit->p_);
      patch_plot = new PatchPlot (*patchFit->p_);
    
      patch_plot->showPatch (viewer_, 0, 1, 0, boost::to_string(ns) + "_patch");
    
      delete (patch_plot);
    }
    //double now_pp = pcl::getTime ();
    //cerr << "Time for ploting patch: " << now_pp-last_pp << endl;

    if (!no_stats_)
    {
      // Save normal vector
      (*patchFit->p_).setCnn (sal_->getNormalNormalAngle (seed));
      if (use_gravity_)
        (*patchFit->p_).setCng (sal_->getNormalGravityAngle (seed));

      // Step 7: save statistics
      AutoSeg::saveStat (*patchFit->p_);
    
      // Step 8: print statistics
      Vector2d sk = (*patchFit->p_).getSK();
      cout << "patch stats" << " "
           << sk (0) << " "
           << sk (1) << " "
           << (*patchFit->p_).getCnn () << " "
           << (*patchFit->p_).getCng () << endl;
    }
    // delete objects
    delete (patchFit);

  } // for each seed

  if(do_vis_ && show_patches_) max_patch_plot_id_ = ns-1;
  else max_patch_plot_id_ = 0;

  // Total timing
  double now = getTime();
  cerr << "Total autoseg for " << num_patches << " patches out of " << ns <<
          " seed points: " << now-last << " sec." << endl;

  // destroy objects
  delete (sal_);
}
Beispiel #9
0
// barebones version of the lasso for fixed lambda
// Eigen library is used for linear algebra
// x .............. predictor matrix
// y .............. response
// lambda ......... penalty parameter
// useSubset ...... logical indicating whether lasso should be computed on a
//                  subset
// subset ......... indices of subset on which lasso should be computed
// normalize ...... logical indicating whether predictors should be normalized
// useIntercept ... logical indicating whether intercept should be included
// eps ............ small numerical value (effective zero)
// useGram ........ logical indicating whether Gram matrix should be computed
//                  in advance
// useCrit ........ logical indicating whether to compute objective function
void fastLasso(const MatrixXd& x, const VectorXd& y, const double& lambda,
		const bool& useSubset, const VectorXi& subset, const bool& normalize, 
    const bool& useIntercept, const double& eps, const bool& useGram, 
    const bool& useCrit,
    // intercept, coefficients, residuals and objective function are returned 
    // through the following parameters
    double& intercept, VectorXd& beta, VectorXd& residuals, double& crit) {

	// data initializations
	int n, p = x.cols();
	MatrixXd xs;
	VectorXd ys;
	if(useSubset) {
		n = subset.size();
		xs.resize(n, p);
		ys.resize(n);
		int s;
		for(int i = 0; i < n; i++) {
			s = subset(i);
			xs.row(i) = x.row(s);
			ys(i) = y(s);
		}
	} else {
		n = x.rows();
		xs = x;	// does this copy memory?
		ys = y;	// does this copy memory?
	}
	double rescaledLambda = n * lambda / 2;

	// center data and store means
	RowVectorXd meanX;
	double meanY;
	if(useIntercept) {
		meanX = xs.colwise().mean();	// columnwise means of predictors
		xs.rowwise() -= meanX;			// sweep out columnwise means
		meanY = ys.mean();				// mean of response
		for(int i = 0; i < n; i++) {
			ys(i) -= meanY;				// sweep out mean
		}
	} else {
		meanY = 0;		// just to avoid warning, this is never used
//		intercept = 0;	// zero intercept
	}

	// some initializations
	VectorXi inactive(p);	// inactive predictors
	int m = 0;				// number of inactive predictors
	VectorXi ignores;		// indicates variables to be ignored
	int s = 0;				// number of ignored variables

	// normalize predictors and store norms
	RowVectorXd normX;
  if(normalize) {
    normX = xs.colwise().norm();	// columnwise norms
	  double epsNorm = eps * sqrt(n);	// R package 'lars' uses n, not n-1
	  for(int j = 0; j < p; j++) {
		  if(normX(j) < epsNorm) {
			  // variance is too small: ignore variable
			  ignores.append(j, s);
			  s++;
			  // set norm to tolerance to avoid numerical problems
			  normX(j) = epsNorm;
		  } else {
			  inactive(m) = j;		// add variable to inactive set
			  m++;					// increase number of inactive variables
		  }
		  xs.col(j) /= normX(j);		// sweep out norm
	  }
	  // resize inactive set and update number of variables if necessary
	  if(m < p) {
		  inactive.conservativeResize(m);
		  p = m;
	  }
  } else {
    for(int j = 0; j < p; j++) inactive(j) = j;  // add variable to inactive set
    m = p;
  }

	// compute Gram matrix if requested (saves time if number of variables is
	// not too large)
	MatrixXd Gram;
	if(useGram) {
		Gram.noalias() = xs.transpose() * xs;
	}

	// further initializations for iterative steps
  RowVectorXd corY;
  corY.noalias() = ys.transpose() * xs; // current correlations
  beta.resize(p+s);                     // final coefficients

  // compute lasso solution
  if(p == 1) {

    // special case of only one variable (with sufficiently large norm)
    int j = inactive(0);          
    // set maximum step size in the direction of that variable
    double maxStep = corY(j);
    if(maxStep < 0) maxStep = -maxStep; // absolute value
    // compute coefficients for least squares solution
    VectorXd betaLS = xs.col(j).householderQr().solve(ys);
    // compute lasso coefficients
    beta.setZero();
    if(rescaledLambda < maxStep) {
      // interpolate towards least squares solution
      beta(j) = (maxStep - rescaledLambda) * betaLS(0) / maxStep;
    }

  } else {

    // further initializations for iterative steps
    VectorXi active;  // active predictors
  	int k = 0;        // number of active predictors
    // previous and current regression coefficients
  	VectorXd previousBeta = VectorXd::Zero(p+s), currentBeta = VectorXd::Zero(p+s);
  	// previous and current penalty parameter
    double previousLambda = 0, currentLambda = 0;
  	// indicates variables to be dropped
    VectorXi drops;
  	// keep track of sign of correlations for the active variables 
    // (double precision is necessary for solve())
    VectorXd signs;
  	// Cholesky L of Gram matrix of active variables
    MatrixXd L;
  	int rank = 0;		// rank of Cholesky L
    // maximum number of variables to be sequenced
  	int maxActive = findMaxActive(n, p, useIntercept);

  	// modified LARS algorithm for lasso solution
  	while(k < maxActive) {

  		// extract current correlations of inactive variables
  		VectorXd corInactiveY(m);
  		for(int j = 0; j < m; j++) {
  			corInactiveY(j) = corY(inactive(j));
  		}
  		// compute absolute values of correlations and find maximum
  		VectorXd absCorInactiveY = corInactiveY.cwiseAbs();
  		double maxCor = absCorInactiveY.maxCoeff();
  		// update current lambda
  		if(k == 0) {	// no active variables
  			previousLambda = maxCor;
  		} else {
  			previousLambda = currentLambda;
  		}
  		currentLambda = maxCor;
  		if(currentLambda <= rescaledLambda) break;

  		if(drops.size() == 0) {
  			// new active variables
  			VectorXi newActive = findNewActive(absCorInactiveY, maxCor - eps);
  			// do calculations for new active variables
  			for(int j = 0; j < newActive.size(); j++) {
  				// update Cholesky L of Gram matrix of active variables
  				// TODO: put this into void function
  				int newJ = inactive(newActive(j));
  				VectorXd xNewJ;
  				double newX;
  				if(useGram) {
  					newX = Gram(newJ, newJ);
  				} else {
  					xNewJ = xs.col(newJ);
  					newX = xNewJ.squaredNorm();
  				}
  				double normNewX = sqrt(newX);
  				if(k == 0) {	// no active variables, L is empty
  					L.resize(1,1);
  					L(0, 0) = normNewX;
  					rank = 1;
  				} else {
  					VectorXd oldX(k);
  					if(useGram) {
  						for(int j = 0; j < k; j++) {
  							oldX(j) = Gram(active(j), newJ);
  						}
  					} else {
  						for(int j = 0; j < k; j++) {
  							oldX(j) = xNewJ.dot(xs.col(active(j)));
  						}
  					}
  					VectorXd l = L.triangularView<Lower>().solve(oldX);
  					double lkk = newX - l.squaredNorm();
  					// check if L is machine singular
  					if(lkk > eps) {
  						// no singularity: update Cholesky L
  						lkk = sqrt(lkk);
  						rank++;
  						// add new row and column to Cholesky L
  						// this is a little trick: sometimes we need
  						// lower triangular matrix, sometimes upper
  						// hence we define quadratic matrix and use
  						// triangularView() to interpret matrix the
  						// correct way
  						L.conservativeResize(k+1, k+1);
  						for(int j = 0; j < k; j++) {
  							L(k, j) = l(j);
  							L(j, k) = l(j);
  						}
  						L(k,k) = lkk;
  					}
  				}
  				// add new variable to active set or drop it for good
  				// in case of singularity
  				if(rank == k) {
  					// singularity: drop variable for good
  					ignores.append(newJ, s);
  					s++;	// increase number of ignored variables
  					p--;	// decrease number of variables
  					if(p < maxActive) {
  						// adjust maximum number of active variables
  						maxActive = p;
  					}
  				} else {
  					// no singularity: add variable to active set
  					active.append(newJ, k);
  					// keep track of sign of correlation for new active variable
  					signs.append(sign(corY(newJ)), k);
  					k++;	// increase number of active variables
  				}
  			}
  			// remove new active or ignored variables from inactive variables
  			// and corresponding vector of current correlations
  			inactive.remove(newActive);
  			corInactiveY.remove(newActive);
  			m = inactive.size();	// update number of inactive variables
  		}
  		// prepare for computation of step size
  		// here double precision of signs is necessary
  		VectorXd b = L.triangularView<Lower>().solve(signs);
  		VectorXd G = L.triangularView<Upper>().solve(b);
  		// correlations of active variables with equiangular vector
  		double corActiveU = 1/sqrt(G.dot(signs));
  		// coefficients of active variables in linear combination forming the
  		// equiangular vector
  		VectorXd w = G * corActiveU;	// note that this has the right signs
  		// equiangular vector
  		VectorXd u;
  		if(!useGram) {
  			// we only need equiangular vector if we don't use the precomputed
  			// Gram matrix, otherwise we can compute the correlations directly
  			// from the Gram matrix
  			u = VectorXd::Zero(n);
  			for(int i = 0; i < n; i++) {
  				for(int j = 0; j < k; j++) {
  					u(i) += xs(i, active(j)) * w(j);
  				}
  			}
  		}
  		// compute step size in equiangular direction
  		double step;
  		if(k < maxActive) {
  			// correlations of inactive variables with equiangular vector
  			VectorXd corInactiveU(m);
  			if(useGram) {
  				for(int j = 0; j < m; j++) {
  					corInactiveU(j) = 0;
  					for(int i = 0; i < k; i++) {
  						corInactiveU(j) += w(i) * Gram(active(i), inactive(j));
  					}
  				}
  			} else {
  				for(int j = 0; j < m; j++) {
  					corInactiveU(j) = u.dot(xs.col(inactive(j)));
  				}
  			}
  			// compute step size in the direction of the equiangular vector
  			step = findStep(maxCor, corInactiveY, corActiveU, corInactiveU, eps);
  		} else {
  			// last step: take maximum possible step
  			step = maxCor/corActiveU;
  		}
  		// adjust step size if any sign changes and drop corresponding variables
  		drops = findDrops(currentBeta, active, w, eps, step);
  		// update current regression coefficients
  		previousBeta = currentBeta;
  		for(int j = 0; j < k; j++) {
  			currentBeta(active(j)) += step * w(j);
  		}
  		// update current correlations
  		if(useGram) {
  			// we also need to do this for active variables, since they may be
  			// dropped at a later stage
  			// TODO: computing a vector step * w in advance may save some computation time
  			for(int j = 0; j < Gram.cols(); j++) {
  				for(int i = 0; i < k; i++) {
  					corY(j) -= step * w(i) * Gram(active(i), j);
  				}
  			}
  		} else {
  			ys -= step * u;	// take step in equiangular direction
  			corY.noalias() = ys.transpose() * xs;	// update correlations
  		}
  		// drop variables if necessary
  		if(drops.size() > 0) {
  			// downdate Cholesky L
  			// TODO: put this into void function
  			for(int j = drops.size()-1; j >= 0; j--) {
  				// variables need to be dropped in descending order
  				int drop = drops(j);	// index with respect to active set
  				// modify upper triangular part as in R package 'lars'
  				// simply deleting columns is not enough, other computations
  				// necessary but complicated due to Fortran code
  				L.removeCol(drop);
  				VectorXd z = VectorXd::Constant(k, 1, 1);
  				k--;	// decrease number of active variables
  				for(int i = drop; i < k; i++) {
  					double a = L(i,i), b = L(i+1,i);
  					if(b != 0.0) {
  						// compute the rotation
  						double tau, s, c;
  						if(std::abs(b) > std::abs(a)) {
  							tau = -a/b;
  							s = 1.0/sqrt(1.0+tau*tau);
  							c = s * tau;
  						} else {
  							tau = -b/a;
  							c = 1.0/sqrt(1.0+tau*tau);
  							s = c * tau;
  						}
  						// update 'L' and 'z';
  						L(i,i) = c*a - s*b;
  						for(int j = i+1; j < k; j++) {
  							a = L(i,j);
  							b = L(i+1,j);
  							L(i,j) = c*a - s*b;
  							L(i+1,j) = s*a + c*b;
  						}
  						a = z(i);
  						b = z(i+1);
  						z(i) = c*a - s*b;
  						z(i+1) = s*a + c*b;
  					}
  				}
  				// TODO: removing all rows together may save some computation time
  				L.conservativeResize(k, NoChange);
  				rank--;
  			}
  			// mirror lower triangular part
  			for(int j = 0; j < k; j++) {
  				for(int i = j+1; i < k; i++) {
  					L(i,j) = L(j,i);
  				}
  			}
  			// add dropped variables to inactive set and make sure
  			// coefficients are 0
  			inactive.conservativeResize(m + drops.size());
  			for(int j = 0; j < drops.size(); j++) {
  				int newInactive = active(drops(j));
  				inactive(m + j) = newInactive;
  				currentBeta(newInactive) = 0;	// make sure coefficient is 0
  			}
  			m = inactive.size();	// update number of inactive variables
  			// drop variables from active set and sign vector
  			// number of active variables is already updated above
  			active.remove(drops);
  			signs.remove(drops);
  		}
  	}

  	// interpolate coefficients for given lambda
    if(k == 0) {
      // lambda larger than largest lambda from steps of LARS algorithm
  		beta.setZero();
    } else {
    	// penalty parameter within two steps
      if(k == maxActive) {
          // current coefficients are the least squares solution (in the 
          // high-dimensional case, as far along the solution path as possible)
          // current and previous values of the penalty parameter need to be 
          // reset for interpolation
          previousLambda = currentLambda;
          currentLambda = 0;
      }
      // interpolate coefficients
    	beta = ((rescaledLambda - currentLambda) * previousBeta +
  				(previousLambda - rescaledLambda) * currentBeta) /
  				(previousLambda - currentLambda);
    }
  }

	// transform coefficients back
  VectorXd normedBeta;
	if(normalize) {
    if(useCrit) normedBeta = beta;
    for(int j = 0; j < p; j++) beta(j) /= normX(j);
	}
	if(useIntercept) intercept = meanY - beta.dot(meanX);

  // compute residuals for all observations
  n = x.rows();
  residuals = y - x * beta;
  if(useIntercept) {
    for(int i = 0; i < n; i++) residuals(i) -= intercept;
  }

  // compute value of objective function on the subset
  if(useCrit && useSubset) {
    if(normalize) crit = objective(normedBeta, residuals, subset, lambda);
    else crit = objective(beta, residuals, subset, lambda);
  }
}