Example #1
0
void Reservoir::linear_activation(VectorXd &inputs, VectorXd &results) {
    unsigned int num_elems = (inputs.rows() > inputs.cols()) ? inputs.rows() : inputs.cols();
    results.resize(num_elems, 1);
    for (unsigned int i=0; i<num_elems; i++) {
        results(i) = inputs(i);
    }
}
 void MultivariateFNormalSufficientSparse::set_FM(const VectorXd& FM)
 {
   if (FM.rows() != FM_.rows() || FM.cols() != FM_.cols() || FM != FM_){
       if (FM.rows() != M_) {
           IMP_THROW("size mismatch for FM: got "
                   <<FM.rows() << " instead of " << M_, ModelException);
           }
       FM_=FM;
       IMP_LOG(TERSE, "MVNsparse:   set FM to new vector" << std::endl);
       compute_epsilon();
   }
 }
 void MultivariateFNormalSufficientSparse::set_Fbar(const VectorXd& Fbar)
 {
   if (Fbar.rows() != Fbar_.rows() || Fbar.cols() != Fbar_.cols()
           || Fbar != Fbar_){
       if (Fbar.rows() != M_) {
           IMP_THROW("size mismatch for Fbar: got "
                   << Fbar.rows() << " instead of " << M_, ModelException);
           }
       Fbar_=Fbar;
       IMP_LOG(TERSE, "MVNsparse:   set Fbar to new vector" << std::endl);
       compute_epsilon();
   }
 }
 void MultivariateFNormalSufficient::set_FM(const VectorXd& FM)
 {
   if (FM.rows() != FM_.rows() || FM.cols() != FM_.cols() || FM != FM_){
       CHECK(FM.rows() == M_,
           "size mismatch for FM: got "
           <<FM.rows() << " instead of " << M_);
       FM_=FM;
       LOG( "MVN:   set FM to new vector" << std::endl);
       flag_epsilon_ = false;
       flag_Peps_ = false;
   }
   flag_FM_ = true;
 }
Example #5
0
bool LocallyWeightedRegression::updateThetas(const VectorXd &deltaThetas)
{
    if (!initialized_)
    {
        printf("ERROR: LWR model not initialized.\n");
        return initialized_;
    }
    assert(deltaThetas.cols() == thetas_.cols());
    assert(deltaThetas.rows() == thetas_.rows());
    thetas_ += deltaThetas;

    return true;
}
 void MultivariateFNormalSufficient::set_FM(const VectorXd& FM)
 {
   if (FM.rows() != FM_.rows() || FM.cols() != FM_.cols() || FM != FM_){
       if (FM.rows() != M_) {
           IMP_THROW("size mismatch for FM: got "
                   <<FM.rows() << " instead of " << M_, ModelException);
           }
       FM_=FM;
       IMP_LOG(TERSE, "MVN:   set FM to new vector" << std::endl);
       flag_epsilon_ = false;
       flag_Peps_ = false;
   }
   flag_FM_ = true;
 }
Example #7
0
bool LocallyWeightedRegression::getThetas(VectorXd &thetas)
{
    if (!initialized_)
    {
        printf("ERROR: LWR model not initialized.\n");
        return initialized_;
    }

    assert(thetas.cols() == thetas_.cols());
    assert(thetas.rows() == thetas_.rows());

    thetas = thetas_;

    return true;
}
 void MultivariateFNormalSufficient::set_Fbar(const VectorXd& Fbar)
 {
   if (Fbar.rows() != Fbar_.rows() || Fbar.cols() != Fbar_.cols()
           || Fbar != Fbar_){
       CHECK(Fbar.rows() == M_,
           "size mismatch for Fbar: got "
                   << Fbar.rows() << " instead of " << M_);
       Fbar_=Fbar;
       LOG( "MVN:   set Fbar to new vector" << std::endl);
       flag_epsilon_ = false;
       flag_W_ = false;
       flag_PW_ = false;
       flag_Peps_ = false;
   }
   flag_Fbar_ = true;
 }
Example #9
0
std::tuple<double, VectorXd, SparseMatrix>
kantorovich_2(const Density_2 &pl,
	      const MatrixXd &X,
	      const VectorXd &w)
{
  size_t N = X.rows();
  assert(X.cols() == 2);
  assert(w.cols() == 1);
  assert(w.rows() == N);

  VectorXd g(N);

  SparseMatrix h;
  double res = MA::kantorovich(pl._t, pl._functions, X, w, g, h);
  return std::make_tuple(res, g, h);
}
 void MultivariateFNormalSufficient::set_Fbar(const VectorXd& Fbar)
 {
   if (Fbar.rows() != Fbar_.rows() || Fbar.cols() != Fbar_.cols()
           || Fbar != Fbar_){
       if (Fbar.rows() != M_) {
           IMP_THROW("size mismatch for Fbar: got "
                   << Fbar.rows() << " instead of " << M_, ModelException);
           }
       Fbar_=Fbar;
       IMP_LOG(TERSE, "MVN:   set Fbar to new vector" << std::endl);
       flag_epsilon_ = false;
       flag_W_ = false;
       flag_PW_ = false;
       flag_Peps_ = false;
   }
   flag_Fbar_ = true;
 }
Example #11
0
std::tuple<VectorXd, MatrixXd, MatrixXd>
moments_2(const Density_2 &pl,
	  const MatrixXd &X,
	  const VectorXd &w)
{
  size_t N = X.rows();
  assert(X.cols() == 2);
  assert(w.cols() == 1);
  assert(w.rows() == N);

  // create some room for return values: masses, centroids and inertia
  VectorXd m(N);
  MatrixXd c(N, 2);
  MatrixXd I(N, 3);

  MA::second_moment(pl._t, pl._functions, X, w, m, c, I);
  return std::make_tuple(m, c, I);
}
Example #12
0
  Density_2(const MatrixXd& X,
            const VectorXd& f,
            const MatrixXi& tri) : gen(clock())
  {
    size_t N = X.rows();
    assert(X.cols() == 2);
    assert(f.cols() == 1);
    assert(f.rows() == N);
    assert(tri.cols() == 3);


    CGAL::Triangulation_incremental_builder_2<T> builder(_t);
    builder.begin_triangulation();

    // add vertices
    std::vector<T::Vertex_handle> vertices(N);
    for (size_t i = 0; i < N; ++i)
      {
	Point p(X(i,0),X(i,1));
	vertices[i] = builder.add_vertex(Point(X(i,0), X(i,1)));
	vertices[i]->info() = i;
      }

    // add faces
    size_t Nt = tri.rows();
    for (size_t i = 0; i < Nt; ++i)
      {
	int a = tri(i,0), b = tri(i,1), c = tri(i,2);
	builder.add_face(vertices[a], vertices[b], vertices[c]);
      }
    builder.end_triangulation();

    // compute functions
    for (T::Finite_faces_iterator it = _t.finite_faces_begin ();
	 it != _t.finite_faces_end(); ++it)
      {
	size_t a = it->vertex(0)->info();
	size_t b = it->vertex(1)->info();
	size_t c = it->vertex(2)->info();
	_functions[it] = Function(vertices[a]->point(), f[a],
				  vertices[b]->point(), f[b],
				  vertices[c]->point(), f[c]);
      }
  }
Example #13
0
void
python_to_delaunay_2(const MatrixXd& X,
                     const VectorXd& w,
		     RT &dt)
{
  size_t N = X.rows();
  assert(X.cols() == 2);
  assert(w.cols() == 1);
  assert(w.rows() == N);

  // insert points with indices in the regular triangulation
  std::vector<std::pair<Weighted_point,size_t> > Xw(N);
  for (size_t i = 0; i < N; ++i)
    {
      Xw[i] = std::make_pair(Weighted_point(Point(X(i,0), X(i,1)),
					    w(i)), i);
    }
  dt.clear();
  dt.insert(Xw.begin(), Xw.end());
  dt.infinite_vertex()->info() = -1;
}
Example #14
0
void mexFunction(int nlhs, mxArray *plhs[],
    int nrhs, const mxArray *prhs[])
{
  // This is useful for debugging whether Matlab is caching the mex binary
  //mexPrintf("%s %s\n",__TIME__,__DATE__);
  igl::MexStream mout;
  std::streambuf *outbuf = std::cout.rdbuf(&mout);

  using namespace std;
  using namespace Eigen;
  using namespace igl;

  MatrixXd V,P,N;
  VectorXd S;
  MatrixXi F;
  int num_samples;
  parse_rhs(nrhs,prhs,V,F,P,N,num_samples);
  // Prepare left-hand side
  nlhs = 1;

  //read_triangle_mesh("../shared/cheburashka.off",V,F);
  //P = V;
  //per_vertex_normals(V,F,N);
  ambient_occlusion(V,F,P,N,num_samples,S);
  //MatlabWorkspace mw;
  //mw.save(V,"V");
  //mw.save(P,"P");
  //mw.save(N,"N");
  //mw.save_index(F,"F");
  //mw.save(S,"S");
  //mw.write("out.mat");

  plhs[0] = mxCreateDoubleMatrix(S.rows(),S.cols(), mxREAL);
  copy(S.data(),S.data()+S.size(),mxGetPr(plhs[0]));

  // Restore the std stream buffer Important!
  std::cout.rdbuf(outbuf);
}
Example #15
0
// template <typename tA, typename tB, typename tC, typename tD, typename tE,
// typename tF, typename tG>
// int fastQPThatTakesQinv(vector< MatrixBase<tA>* > QinvblkDiag, const
// MatrixBase<tB>& f, const MatrixBase<tC>& Aeq, const MatrixBase<tD>& beq,
// const MatrixBase<tE>& Ain, const MatrixBase<tF>& bin, set<int>& active,
// MatrixBase<tG>& x)
int fastQPThatTakesQinv(vector<MatrixXd*> QinvblkDiag, const VectorXd& f,
                        const MatrixXd& Aeq, const VectorXd& beq,
                        const MatrixXd& Ain, const VectorXd& bin,
                        set<int>& active, VectorXd& x) {
  int i, d;
  int iterCnt = 0;

  int M_in = bin.size();
  int M = Aeq.rows();
  int N = Aeq.cols();

  if (f.rows() != N) {
    cerr << "size of f (" << f.rows() << " by " << f.cols()
         << ") doesn't match cols of Aeq (" << Aeq.rows() << " by "
         << Aeq.cols() << ")" << endl;
    return 2;
  }
  if (beq.rows() != M) {
    cerr << "size of beq doesn't match rows of Aeq" << endl;
    return 2;
  }
  if (Ain.cols() != N) {
    cerr << "cols of Ain doesn't match cols of Aeq" << endl;
    return 2;
  };
  if (bin.rows() != Ain.rows()) {
    cerr << "bin rows doesn't match Ain rows" << endl;
    return 2;
  };
  if (x.rows() != N) {
    cerr << "x doesn't match Aeq" << endl;
    return 2;
  }
  int n_active = active.size();

  MatrixXd Aact = MatrixXd(n_active, N);
  VectorXd bact = VectorXd(n_active);

  MatrixXd QinvAteq(N, M);
  VectorXd minusQinvf(N);

  // calculate a bunch of stuff that is constant during each iteration
  int startrow = 0;
  //  for (typename vector< MatrixBase<tA>* >::iterator
  //  iterQinv=QinvblkDiag.begin(); iterQinv!=QinvblkDiag.end(); iterQinv++) {
  //  	MatrixBase<tA> *thisQinv = *iterQinv;
  for (vector<MatrixXd*>::iterator iterQinv = QinvblkDiag.begin();
       iterQinv != QinvblkDiag.end(); iterQinv++) {
    MatrixXd* thisQinv = *iterQinv;
    int numRow = thisQinv->rows();
    int numCol = thisQinv->cols();

    if (numRow == 1 || numCol == 1) {  // it's a vector
      d = numRow * numCol;
      if (M > 0)
        QinvAteq.block(startrow, 0, d, M) =
            thisQinv->asDiagonal() *
            Aeq.block(0, startrow, M, d)
                .transpose();  // Aeq.transpoODse().block(startrow, 0, d, N)
      minusQinvf.segment(startrow, d) =
          -thisQinv->cwiseProduct(f.segment(startrow, d));
      startrow = startrow + d;
    } else {  // potentially dense matrix
      d = numRow;
      if (numRow != numCol) {
        cerr << "Q is not square! " << numRow << "x" << numCol << "\n";
        return -2;
      }
      if (M > 0)
        QinvAteq.block(startrow, 0, d, M) = thisQinv->operator*(
            Aeq.block(0, startrow, M, d)
                .transpose());  // Aeq.transpose().block(startrow, 0, d, N)
      minusQinvf.segment(startrow, d) =
          -thisQinv->operator*(f.segment(startrow, d));
      startrow = startrow + d;
    }
    if (startrow > N) {
      cerr << "Q is too big!" << endl;
      return -2;
    }
  }
  if (startrow != N) {
    cerr << "Q is the wrong size.  Got " << startrow << "by" << startrow
         << " but needed " << N << "by" << N << endl;
    return -2;
  }

  MatrixXd A;
  VectorXd b;
  MatrixXd QinvAt;
  VectorXd lam, lamIneq;
  VectorXd violated(M_in);
  VectorXd violation;

  while (1) {
    iterCnt++;

    n_active = active.size();
    Aact.resize(n_active, N);
    bact.resize(n_active);

    i = 0;
    for (set<int>::iterator iter = active.begin(); iter != active.end();
         iter++) {
      if (*iter < 0 || *iter >= Ain.rows()) {
        return -3;  // active set is invalid.  exit quietly, because this is
                    // expected behavior in normal operation (e.g. it means I
                    // should immediately kick out to gurobi)
      }
      Aact.row(i) = Ain.row(*iter);
      bact(i++) = bin(*iter);
    }

    A.resize(Aeq.rows() + Aact.rows(), N);
    b.resize(beq.size() + bact.size());
    A << Aeq, Aact;
    b << beq, bact;

    if (A.rows() > 0) {
      // Solve H * [x;lam] = [-f;b] using Schur complements, H = [Q, At';A, 0];
      QinvAt.resize(QinvAteq.rows(), QinvAteq.cols() + Aact.rows());

      if (n_active > 0) {
        int startrow = 0;
        for (vector<MatrixXd*>::iterator iterQinv = QinvblkDiag.begin();
             iterQinv != QinvblkDiag.end(); iterQinv++) {
          MatrixXd* thisQinv = (*iterQinv);
          d = thisQinv->rows();
          int numCol = thisQinv->cols();

          if (numCol == 1) {  // it's a vector
            QinvAt.block(startrow, 0, d, M + n_active)
                << QinvAteq.block(startrow, 0, d, M),
                thisQinv->asDiagonal() *
                    Aact.block(0, startrow, n_active, d).transpose();
          } else {  // it's a matrix
            QinvAt.block(startrow, 0, d, M + n_active)
                << QinvAteq.block(startrow, 0, d, M),
                thisQinv->operator*(
                    Aact.block(0, startrow, n_active, d).transpose());
          }

          startrow = startrow + d;
        }
      } else {
        QinvAt = QinvAteq;
      }

      lam.resize(QinvAt.cols());
      lam =
          -(A * QinvAt).ldlt().solve(b + (f.transpose() * QinvAt).transpose());
      x = minusQinvf - QinvAt * lam;
      lamIneq = lam.tail(lam.size() - M);
    } else {
      x = minusQinvf;
      lamIneq.resize(0);
    }

    if (Ain.rows() == 0) {
      active.clear();
      break;
    }

    set<int> new_active;

    violation = Ain * x - bin;
    for (i = 0; i < M_in; i++)
      if (violation(i) >= 1e-6) new_active.insert(i);

    bool all_pos_mults = true;
    for (i = 0; i < n_active; i++) {
      if (lamIneq(i) < 0) {
        all_pos_mults = false;
        break;
      }
    }
    if (new_active.empty() && all_pos_mults) {
      // existing active was AOK
      break;
    }

    i = 0;
    set<int>::iterator iter = active.begin(), tmp;
    while (iter != active.end()) {  // to accomodating inloop erase
      tmp = iter++;
      if (lamIneq(i++) < 0) {
        active.erase(tmp);
      }
    }
    active.insert(new_active.begin(), new_active.end());

    if (iterCnt > MAX_ITER) {
      // Default to calling this method
      //      cout << "FastQP max iter reached." << endl;
      //       mexErrMsgIdAndTxt("Drake:approximateIKmex:Error", "Max iter
      //       reached. Problem is likely infeasible");
      return -1;
    }
  }
  return iterCnt;
}
void mexFunction(int nlhs, mxArray *plhs[], 
    int nrhs, const mxArray *prhs[])
{
  // This is useful for debugging whether Matlab is caching the mex binary
  //mexPrintf("%s %s\n",__TIME__,__DATE__);
  igl::matlab::MexStream mout;
  std::streambuf *outbuf = std::cout.rdbuf(&mout);

  using namespace std;
  using namespace Eigen;
  using namespace igl;
  using namespace igl::matlab;

  MatrixXd P,V,C;
  VectorXi I;
  VectorXd sqrD;
  MatrixXi F;
  if(nrhs < 3)
  {
    mexErrMsgTxt("nrhs < 3");
  }
  parse_rhs_double(prhs,P);
  parse_rhs_double(prhs+1,V);
  parse_rhs_index(prhs+2,F);
  mexErrMsgTxt(P.cols()==3 || P.cols()==2,"P must be #P by (3|2)");
  mexErrMsgTxt(V.cols()==3 || V.cols()==2,"V must be #V by (3|2)");
  mexErrMsgTxt(V.cols()==P.cols(),"dim(V) must be dim(P)");
  mexErrMsgTxt(F.cols()==3 || F.cols()==2 || F.cols()==1,"F must be #F by (3|2|1)");

  point_mesh_squared_distance(P,V,F,sqrD,I,C);
  // Prepare left-hand side
  switch(nlhs)
  {
    case 3:
    {
      // Treat indices as reals
      plhs[2] = mxCreateDoubleMatrix(C.rows(),C.cols(), mxREAL);
      double * Cp = mxGetPr(plhs[2]);
      copy(&C.data()[0],&C.data()[0]+C.size(),Cp);
      // Fallthrough
    }
    case 2:
    {
      // Treat indices as reals
      plhs[1] = mxCreateDoubleMatrix(I.rows(),I.cols(), mxREAL);
      double * Ip = mxGetPr(plhs[1]);
      VectorXd Id = (I.cast<double>().array()+1).matrix();
      copy(&Id.data()[0],&Id.data()[0]+Id.size(),Ip);
      // Fallthrough
    }
    case 1:
    {
      plhs[0] = mxCreateDoubleMatrix(sqrD.rows(),sqrD.cols(), mxREAL);
      double * sqrDp = mxGetPr(plhs[0]);
      copy(&sqrD.data()[0],&sqrD.data()[0]+sqrD.size(),sqrDp);
      break;
    }
    default:break;
  }

  // Restore the std stream buffer Important!
  std::cout.rdbuf(outbuf);
}
Example #17
0
//#define IGL_LINPROG_VERBOSE
IGL_INLINE bool igl::linprog(
  const Eigen::VectorXd & c,
  const Eigen::MatrixXd & _A,
  const Eigen::VectorXd & b,
  const int k,
  Eigen::VectorXd & x)
{
  // This is a very literal translation of
  // http://www.mathworks.com/matlabcentral/fileexchange/2166-introduction-to-linear-algebra/content/strang/linprog.m
  using namespace Eigen;
  using namespace std;
  bool success = true;
  // number of constraints
  const int m = _A.rows();
  // number of original variables
  const int n = _A.cols();
  // number of iterations
  int it = 0;
  // maximum number of iterations
  //const int MAXIT = 10*m;
  const int MAXIT = 100*m;
  // residual tolerance
  const double tol = 1e-10;
  const auto & sign = [](const Eigen::VectorXd & B) -> Eigen::VectorXd
  {
    Eigen::VectorXd Bsign(B.size());
    for(int i = 0;i<B.size();i++)
    {
      Bsign(i) = B(i)>0?1:(B(i)<0?-1:0);
    }
    return Bsign;
  };
  // initial (inverse) basis matrix
  VectorXd Dv = sign(sign(b).array()+0.5);
  Dv.head(k).setConstant(1.);
  MatrixXd D = Dv.asDiagonal();
  // Incorporate slack variables
  MatrixXd A(_A.rows(),_A.cols()+D.cols());
  A<<_A,D;
  // Initial basis
  VectorXi B = igl::colon<int>(n,n+m-1);
  // non-basis, may turn out that vector<> would be better here
  VectorXi N = igl::colon<int>(0,n-1);
  int j;
  double bmin = b.minCoeff(&j);
  int phase;
  VectorXd xb;
  VectorXd s;
  VectorXi J;
  if(k>0 && bmin<0)
  {
    phase = 1;
    xb = VectorXd::Ones(m);
    // super cost
    s.resize(n+m+1);
    s<<VectorXd::Zero(n+k),VectorXd::Ones(m-k+1);
    N.resize(n+1);
    N<<igl::colon<int>(0,n-1),B(j);
    J.resize(B.size()-1);
    // [0 1 2 3 4]
    //      ^
    // [0 1]
    //      [3 4]
    J.head(j) = B.head(j);
    J.tail(B.size()-j-1) = B.tail(B.size()-j-1);
    B(j) = n+m;
    MatrixXd AJ;
    igl::slice(A,J,2,AJ);
    const VectorXd a = b - AJ.rowwise().sum();
    {
      MatrixXd old_A = A;
      A.resize(A.rows(),A.cols()+a.cols());
      A<<old_A,a;
    }
    D.col(j) = -a/a(j);
    D(j,j) = 1./a(j);
  }else if(k==m)
  {
    phase = 2;
    xb = b;
    s.resize(c.size()+m);
    // cost function
    s<<c,VectorXd::Zero(m);
  }else //k = 0 or bmin >=0
  {
    phase = 1;
    xb = b.array().abs();
    s.resize(n+m);
    // super cost
    s<<VectorXd::Zero(n+k),VectorXd::Ones(m-k);
  }
  while(phase<3)
  {
    double df = -1;
    int t = std::numeric_limits<int>::max();
    // Lagrange mutipliers fro Ax=b
    VectorXd yb = D.transpose() * igl::slice(s,B);
    while(true)
    {
      if(MAXIT>0 && it>=MAXIT)
      {
#ifdef IGL_LINPROG_VERBOSE
        cerr<<"linprog: warning! maximum iterations without convergence."<<endl;
#endif
        success = false;
        break;
      }
      // no freedom for minimization
      if(N.size() == 0)
      {
        break;
      }
      // reduced costs
      VectorXd sN = igl::slice(s,N);
      MatrixXd AN = igl::slice(A,N,2);
      VectorXd r = sN - AN.transpose() * yb;
      int q;
      // determine new basic variable
      double rmin = r.minCoeff(&q);
      // optimal! infinity norm
      if(rmin>=-tol*(sN.array().abs().maxCoeff()+1))
      {
        break;
      }
      // increment iteration count
      it++;
      // apply Bland's rule to avoid cycling
      if(df>=0)
      {
        if(MAXIT == -1)
        {
#ifdef IGL_LINPROG_VERBOSE
          cerr<<"linprog: warning! degenerate vertex"<<endl;
#endif
          success = false;
        }
        igl::find((r.array()<0).eval(),J);
        double Nq = igl::slice(N,J).minCoeff();
        // again seems like q is assumed to be a scalar though matlab code
        // could produce a vector for multiple matches
        (N.array()==Nq).cast<int>().maxCoeff(&q);
      }
      VectorXd d = D*A.col(N(q));
      VectorXi I;
      igl::find((d.array()>tol).eval(),I);
      if(I.size() == 0)
      {
#ifdef IGL_LINPROG_VERBOSE
        cerr<<"linprog: warning! solution is unbounded"<<endl;
#endif
        // This seems dubious:
        it=-it;
        success = false;
        break;
      }
      VectorXd xbd = igl::slice(xb,I).array()/igl::slice(d,I).array();
      // new use of r
      int p;
      {
        double r;
        r = xbd.minCoeff(&p);
        p = I(p);
        // apply Bland's rule to avoid cycling
        if(df>=0)
        {
          igl::find((xbd.array()==r).eval(),J);
          double Bp = igl::slice(B,igl::slice(I,J)).minCoeff();
          // idiotic way of finding index in B of Bp
          // code down the line seems to assume p is a scalar though the matlab
          // code could find a vector of matches)
          (B.array()==Bp).cast<int>().maxCoeff(&p);
        }
        // update x
        xb -= r*d;
        xb(p) = r;
        // change in f
        df = r*rmin;
      }
      // row vector
      RowVectorXd v = D.row(p)/d(p);
      yb += v.transpose() * (s(N(q)) - d.transpose()*igl::slice(s,B));
      d(p)-=1;
      // update inverse basis matrix
      D = D - d*v;
      t = B(p);
      B(p) = N(q);
      if(t>(n+k-1))
      {
        // remove qth entry from N
        VectorXi old_N = N;
        N.resize(N.size()-1);
        N.head(q) = old_N.head(q);
        N.head(q) = old_N.head(q);
        N.tail(old_N.size()-q-1) = old_N.tail(old_N.size()-q-1);
      }else
      {
        N(q) = t;
      }
    }
    // iterative refinement
    xb = (xb+D*(b-igl::slice(A,B,2)*xb)).eval();
    // must be due to rounding
    VectorXi I;
    igl::find((xb.array()<0).eval(),I);
    if(I.size()>0)
    {
      // so correct
      VectorXd Z = VectorXd::Zero(I.size(),1);
      igl::slice_into(Z,I,xb);
    }
    // B, xb,n,m,res=A(:,B)*xb-b
    if(phase == 2 || it<0)
    {
      break;
    }
    if(xb.transpose()*igl::slice(s,B) > tol)
    {
      it = -it;
#ifdef IGL_LINPROG_VERBOSE
      cerr<<"linprog: warning, no feasible solution"<<endl;
#endif
      success = false;
      break;
    }
    // re-initialize for Phase 2
    phase = phase+1;
    s*=1e6*c.array().abs().maxCoeff();
    s.head(n) = c;
  }
  x.resize(std::max(B.maxCoeff()+1,n));
  igl::slice_into(xb,B,x);
  x = x.head(n).eval();
  return success;
}
//TBD: J -> adfda, e should take data as input too.
//PatchFit::lm(MatrixXd (*J)(VectorXd) ,VectorXd (*e)(VectorXd), VectorXd a_Init)
VectorXd
PatchFit::lm(VectorXd a_Init)
{
  // Options
  bool normalize (false);
  double l_Init = 0.001;
  double nu = 10;

  VectorXd a (a_Init.rows(), a_Init.cols());
  a = a_Init;
  
  double l;
  l = l_Init;

  int i=0; //iteration
  //bool cc = false; // whether the last iteration committed a change to a

  double r; //residual
  
  //calculate initial error and residual
  if (normalize)
  {
    a = a/a.norm();
    //cc = true;
  }

  VectorXd ee;
  ee = PatchFit::parab(cloud_vec,a);
  r = ee.norm();
  //double r_Init = r;

  double dr = 0.0;
  MatrixXd jj;
  
  double lastr;

  // define matrices
  MatrixXd jtj, jtj_diag, aa;
  VectorXd b, da, lasta, lastee;
  while (1)
  {
    // terminating due to 0 residual at iteration i
    if (fabs(r) < EPS)
      break;

    // terminating due to minad at iteration i
    //TBD

    // terminating due to minrd at iteration i
    if ((i>0) && (this->minrd_>0.0) && (dr<0.0) && (fabs(dr)<this->minrd_*lastr))
      break;

    // terminating due to minar at iteration i
    //TBD

    // terminating due to minrr at iteration i
    //TBD
   
    // terminating due to minada at iteration i
    //TBD

    // terminating due to minrda at iteration i
    //TBD

    // terminating due to max iterations
    if (i==this->maxi_)
      break;

    i = i+1;

    // update Jacobian if necessary
    if ((i==1)||(dr<0.0))
      jj = PatchFit::parab_dfda(a);

    // calculate da
    jtj = jj.adjoint()*jj;
    jtj_diag = jtj.diagonal().asDiagonal();
    aa = jtj + (l*jtj_diag);
    b = -jj.adjoint()*ee;

    // solve LLS system
    da = lls(aa,b);
    
    // update a (will back out the update if residual increased)
    lasta = a;
    a = a+da;

    if (normalize)
      a = a/a.norm();
    
    // update error and residual
    lastr = r;
    lastee = ee;
    ee = PatchFit::parab(cloud_vec,a);
    r = ee.norm();
    dr = r-lastr;

    if (dr<0) // converging
    {
      l = l/nu;
      //cc = true;
    }
    else // diverging
    {
      l = l*nu;
      a = lasta;
      ee = lastee;
      r = lastr;
      //cc = false;
    }
  }

  if (normalize && (i==0))
    a = a/a.norm();

  return (a);
}