float mmf::OptSO3ApproxGD::conjugateGradientCUDA_impl(Matrix3f& R, float res0,
    uint32_t n, uint32_t maxIter) {
  theta_ = SO3f(R);
  SO3f thetaPrev(R);
  Eigen::Vector3f J = Eigen::Vector3f::Zero(); 
  float fPrev = 1e12;
  float f = res0;
//  T delta = 1e-2;
  uint32_t it=0;
  while((fPrev-f)/fabs(f) > thr_ && it < maxIter) {
    fPrev = f;
    LineSearch(&J, &f);
//    ComputeJacobian(theta_, &J, &f);
    thetaPrev = theta_;
    theta_ += J;
    std::cout << "@" << it << " f=" << f << " df/f=" <<
      (fPrev-f)/fabs(f) << std::endl;
    ++it;
  }
  if (f > fPrev) {
    theta_ = thetaPrev;
  }
  R = theta_.matrix();

  if (f > fPrev) return fPrev;
  return f;
}
Beispiel #2
0
double L_BFGS<FunctionType>::Optimize(arma::mat& iterate,
                                      const size_t maxIterations)
{
  // Ensure that the cubes holding past iterations' information are the right
  // size.  Also set the current best point value to the maximum.
  const size_t rows = function.GetInitialPoint().n_rows;
  const size_t cols = function.GetInitialPoint().n_cols;

  s.set_size(rows, cols, numBasis);
  y.set_size(rows, cols, numBasis);
  minPointIterate.second = std::numeric_limits<double>::max();

  // The old iterate to be saved.
  arma::mat oldIterate;
  oldIterate.zeros(iterate.n_rows, iterate.n_cols);

  // Whether to optimize until convergence.
  bool optimizeUntilConvergence = (maxIterations == 0);

  // The initial function value.
  double functionValue = Evaluate(iterate);

  // The gradient: the current and the old.
  arma::mat gradient;
  arma::mat oldGradient;
  gradient.zeros(iterate.n_rows, iterate.n_cols);
  oldGradient.zeros(iterate.n_rows, iterate.n_cols);

  // The search direction.
  arma::mat searchDirection;
  searchDirection.zeros(iterate.n_rows, iterate.n_cols);

  // The initial gradient value.
  function.Gradient(iterate, gradient);

  // The main optimization loop.
  for (size_t itNum = 0; optimizeUntilConvergence || (itNum != maxIterations);
       ++itNum)
  {
    Rcpp::Rcout << "L-BFGS iteration " << itNum << "; objective " <<
        function.Evaluate(iterate) << "." << std::endl;

    // Break when the norm of the gradient becomes too small.
    if (GradientNormTooSmall(gradient))
    {
      Rcpp::Rcout << "L-BFGS gradient norm too small (terminating successfully)."
          << std::endl;
      break;
    }

    // Choose the scaling factor.
    double scalingFactor = ChooseScalingFactor(itNum, gradient);

    // Build an approximation to the Hessian and choose the search
    // direction for the current iteration.
    SearchDirection(gradient, itNum, scalingFactor, searchDirection);

    // Save the old iterate and the gradient before stepping.
    oldIterate = iterate;
    oldGradient = gradient;

    // Do a line search and take a step.
    if (!LineSearch(functionValue, iterate, gradient, searchDirection))
    {
      Rcpp::Rcout << "Line search failed.  Stopping optimization." << std::endl;
      break; // The line search failed; nothing else to try.
    }

    // It is possible that the difference between the two coordinates is zero.
    // In this case we terminate successfully.
    if (accu(iterate != oldIterate) == 0)
    {
      Rcpp::Rcout << "L-BFGS step size of 0 (terminating successfully)."
          << std::endl;
      break;
    }

    // Overwrite an old basis set.
    UpdateBasisSet(itNum, iterate, oldIterate, gradient, oldGradient);

  } // End of the optimization loop.

  return function.Evaluate(iterate);
}
Beispiel #3
0
double L_BFGS<FunctionType>::Optimize(arma::mat& iterate,
                                      const size_t maxIterations)
{
  // Ensure that the cubes holding past iterations' information are the right
  // size.  Also set the current best point value to the maximum.
  const size_t rows = function.GetInitialPoint().n_rows;
  const size_t cols = function.GetInitialPoint().n_cols;

  s.set_size(rows, cols, numBasis);
  y.set_size(rows, cols, numBasis);
  minPointIterate.second = std::numeric_limits<double>::max();

  // The old iterate to be saved.
  arma::mat oldIterate;
  oldIterate.zeros(iterate.n_rows, iterate.n_cols);

  // Whether to optimize until convergence.
  bool optimizeUntilConvergence = (maxIterations == 0);

  // The initial function value.
  double functionValue = Evaluate(iterate);
  double prevFunctionValue = functionValue;

  // The gradient: the current and the old.
  arma::mat gradient;
  arma::mat oldGradient;
  gradient.zeros(iterate.n_rows, iterate.n_cols);
  oldGradient.zeros(iterate.n_rows, iterate.n_cols);

  // The search direction.
  arma::mat searchDirection;
  searchDirection.zeros(iterate.n_rows, iterate.n_cols);

  // The initial gradient value.
  function.Gradient(iterate, gradient);

  // The main optimization loop.
  for (size_t itNum = 0; optimizeUntilConvergence || (itNum != maxIterations);
       ++itNum)
  {
    Log::Debug << "L-BFGS iteration " << itNum << "; objective " <<
        function.Evaluate(iterate) << ", gradient norm " <<
        arma::norm(gradient, 2) << ", " <<
        ((prevFunctionValue - functionValue) /
         std::max(std::max(fabs(prevFunctionValue), fabs(functionValue)), 1.0)) << "." << std::endl;

    prevFunctionValue = functionValue;

    // Break when the norm of the gradient becomes too small.
    //
    // But don't do this on the first iteration to ensure we always take at
    // least one descent step.
    if (itNum > 0 && GradientNormTooSmall(gradient))
    {
      Log::Debug << "L-BFGS gradient norm too small (terminating successfully)."
          << std::endl;
      break;
    }

    // Break if the objective is not a number.
    if (std::isnan(functionValue))
    {
      Log::Warn << "L-BFGS terminated with objective " << functionValue << "; "
          << "are the objective and gradient functions implemented correctly?"
          << std::endl;
      break;
    }

    // Choose the scaling factor.
    double scalingFactor = ChooseScalingFactor(itNum, gradient);

    // Build an approximation to the Hessian and choose the search
    // direction for the current iteration.
    SearchDirection(gradient, itNum, scalingFactor, searchDirection);

    // Save the old iterate and the gradient before stepping.
    oldIterate = iterate;
    oldGradient = gradient;

    // Do a line search and take a step.
    if (!LineSearch(functionValue, iterate, gradient, searchDirection))
    {
      Log::Debug << "Line search failed.  Stopping optimization." << std::endl;
      break; // The line search failed; nothing else to try.
    }

    // It is possible that the difference between the two coordinates is zero.
    // In this case we terminate successfully.
    if (accu(iterate != oldIterate) == 0)
    {
      Log::Debug << "L-BFGS step size of 0 (terminating successfully)."
          << std::endl;
      break;
    }

    // If we can't make progress on the gradient, then we'll also accept
    // a stable function value.
    const double denom =
      std::max(
        std::max(fabs(prevFunctionValue), fabs(functionValue)),
        1.0);
    if ((prevFunctionValue - functionValue) / denom <= factr)
    {
      Log::Debug << "L-BFGS function value stable (terminating successfully)."
          << std::endl;
      break;
    }

    // Overwrite an old basis set.
    UpdateBasisSet(itNum, iterate, oldIterate, gradient, oldGradient);

  } // End of the optimization loop.

  return function.Evaluate(iterate);
}
Beispiel #4
0
/*! \fn solve system with Newton-Raphson
 *
 *  \param [in]  [n] size of equation
 *                [eps] tolerance for x
 *                [h] tolerance for f'
 *                [k] maximum number of iterations
 *                [work] work array size of (n*X)
 *                [f] user provided function
 *                [data] userdata
 *                [info]
 *				  [calculate_jacobian] flag which decides whether Jacobian is calculated
 *					(0)  once for the first calculation
 * 					(i)  every i steps (=1 means original newton method)
 * 					(-1) never, factorization has to be given in A
 *
 */
int _omc_newton(int(*f)(int*, double*, double*, void*, int), DATA_NEWTON* solverData, void* userdata)
{

  int i, j, k = 0, l = 0, nrsh = 1;
  int *n = &(solverData->n);
  double *x = solverData->x;
  double *fvec = solverData->fvec;
  double *eps = &(solverData->ftol);
  double *fdeps = &(solverData->epsfcn);
  int * maxfev = &(solverData->maxfev);
  double *fjac = solverData->fjac;
  double *work = solverData->rwork;
  int *iwork = solverData->iwork;
  int *info = &(solverData->info);
  int calc_jac = 1;

  double error_f  = 1.0 + *eps, scaledError_f = 1.0 + *eps, delta_x = 1.0 + *eps, delta_f = 1.0 + *eps, delta_x_scaled = 1.0 + *eps, lambda = 1.0;
  double current_fvec_enorm, enorm_new;


  if(ACTIVE_STREAM(LOG_NLS_V))
  {
    infoStreamPrint(LOG_NLS_V, 1, "######### Start Newton maxfev: %d #########", (int)*maxfev);

    infoStreamPrint(LOG_NLS_V, 1, "x vector");
    for(i=0; i<*n; i++)
      infoStreamPrint(LOG_NLS_V, 0, "x[%d]: %e ", i, x[i]);
    messageClose(LOG_NLS_V);

    messageClose(LOG_NLS_V);
  }

  *info = 1;

  /* calculate the function values */
  (*f)(n, x, fvec, userdata, 1);

  solverData->nfev++;

  /* save current fvec in f_old*/
  memcpy(solverData->f_old, fvec, *n*sizeof(double));

  error_f = current_fvec_enorm = enorm_(n, fvec);

  while(error_f > *eps && scaledError_f > *eps  &&  delta_x > *eps  &&  delta_f > *eps  && delta_x_scaled > *eps)
  {
    if(ACTIVE_STREAM(LOG_NLS_V))
    {
      infoStreamPrint(LOG_NLS_V, 0, "\n**** start Iteration: %d  *****", (int) l);

      /*  Debug output */
      infoStreamPrint(LOG_NLS_V, 1, "function values");
      for(i=0; i<*n; i++)
        infoStreamPrint(LOG_NLS_V, 0, "fvec[%d]: %e ", i, fvec[i]);
      messageClose(LOG_NLS_V);
    }

    /* calculate jacobian if no matrix is given */
    if (calc_jac == 1 && solverData->calculate_jacobian >= 0)
    {
      (*f)(n, x, fvec, userdata, 0);
      solverData->factorization = 0;
      calc_jac = solverData->calculate_jacobian;
    }
    else
    {
      solverData->factorization = 1;
      calc_jac--;
    }


    /* debug output */
    if(ACTIVE_STREAM(LOG_NLS_JAC))
    {
      char buffer[4096];

      infoStreamPrint(LOG_NLS_JAC, 1, "jacobian matrix [%dx%d]", (int)*n, (int)*n);
      for(i=0; i<solverData->n;i++)
      {
        buffer[0] = 0;
        for(j=0; j<solverData->n; j++)
          sprintf(buffer, "%s%10g ", buffer, fjac[i*(*n)+j]);
        infoStreamPrint(LOG_NLS_JAC, 0, "%s", buffer);
      }
      messageClose(LOG_NLS_JAC);
    }

    if (solveLinearSystem(n, iwork, fvec, fjac, solverData) != 0)
    {
      *info=-1;
      break;
    }
    else
    {
      for (i =0; i<*n; i++)
        solverData->x_new[i]=x[i]-solverData->x_increment[i];

      infoStreamPrint(LOG_NLS_V,1,"x_increment");
      for(i=0; i<*n; i++)
        infoStreamPrint(LOG_NLS_V, 0, "x_increment[%d] = %e ", i, solverData->x_increment[i]);
      messageClose(LOG_NLS_V);

      if (solverData->newtonStrategy == NEWTON_DAMPED)
      {
        damping_heuristic(x, f, current_fvec_enorm, n, fvec, &lambda, &k, solverData, userdata);
      }
      else if (solverData->newtonStrategy == NEWTON_DAMPED2)
      {
        damping_heuristic2(0.75, x, f, current_fvec_enorm, n, fvec, &k, solverData, userdata);
      }
      else if (solverData->newtonStrategy == NEWTON_DAMPED_LS)
      {
        LineSearch(x, f, current_fvec_enorm, n, fvec, &k, solverData, userdata);
      }
      else if (solverData->newtonStrategy == NEWTON_DAMPED_BT)
      {
        Backtracking(x, f, current_fvec_enorm, n, fvec, solverData, userdata);
      }
      else
      {
        /* calculate the function values */
        (*f)(n, solverData->x_new, fvec, userdata, 1);
        solverData->nfev++;
      }

      calculatingErrors(solverData, &delta_x, &delta_x_scaled, &delta_f, &error_f, &scaledError_f, n, x, fvec);

      /* updating x */
      memcpy(x, solverData->x_new, *n*sizeof(double));

      /* updating f_old */
      memcpy(solverData->f_old, fvec, *n*sizeof(double));

      current_fvec_enorm = error_f;

      /* check if maximum iteration is reached */
      if (++l > *maxfev)
      {
        *info = -1;
        warningStreamPrint(LOG_NLS_V, 0, "Warning: maximal number of iteration reached but no root found");
        break;
      }
    }

    if(ACTIVE_STREAM(LOG_NLS_V))
    {
      infoStreamPrint(LOG_NLS_V,1,"x vector");
      for(i=0; i<*n; i++)
        infoStreamPrint(LOG_NLS_V, 0, "x[%d] = %e ", i, x[i]);
      messageClose(LOG_NLS_V);
      printErrors(delta_x, delta_x_scaled, delta_f, error_f, scaledError_f, eps);
    }
  }

  solverData->numberOfIterations  += l;
  solverData->numberOfFunctionEvaluations += solverData->nfev;

  return 0;
}
Beispiel #5
0
static real FindMinimum(This *t, cBounds *b, real *xmin, real fmin)
{
  Vector(real, hessian, NDIM*NDIM);
  Vector(real, gfree, NDIM);
  Vector(real, p, NDIM);
  Vector(real, tmp, NDIM);
  Vector(count, ifree, NDIM);
  Vector(count, ifix, NDIM);
  real ftmp, fini = fmin;
  ccount maxeval = t->neval + 50*t->ndim;
  count nfree, nfix;
  count dim, local;

  Clear(hessian, t->ndim*t->ndim);
  for( dim = 0; dim < t->ndim; ++dim )
    Hessian(dim, dim) = 1;

  /* Step 1: - classify the variables as "fixed" (sufficiently close
               to a border) and "free",
             - if the integrand is flat in the direction of the gradient
               w.r.t. the free dimensions, perform a local search. */

  for( local = 0; local < 2; ++local ) {
    bool resample = false;
    nfree = nfix = 0;
    for( dim = 0; dim < t->ndim; ++dim ) {
      if( xmin[dim] < b[dim].lower + (1 + fabsx(b[dim].lower))*QEPS ) {
        xmin[dim] = b[dim].lower;
        ifix[nfix++] = dim;
        resample = true;
      }
      else if( xmin[dim] > b[dim].upper - (1 + fabsx(b[dim].upper))*QEPS ) {
        xmin[dim] = b[dim].upper;
        ifix[nfix++] = Tag(dim);
        resample = true;
      }
      else ifree[nfree++] = dim;
    }

    if( resample ) fini = fmin = Sample(t, xmin);

    if( nfree == 0 ) goto releasebounds;

    Gradient(t, nfree, ifree, b, xmin, fmin, gfree);
    if( local || Length(nfree, gfree) > GTOL ) break;

    ftmp = LocalSearch(t, nfree, ifree, b, xmin, fmin, tmp);
    if( ftmp > fmin - (1 + fabsx(fmin))*RTEPS )
      goto releasebounds;
    fmin = ftmp;
    XCopy(xmin, tmp);
  }

  while( t->neval <= maxeval ) {

    /* Step 2a: perform a quasi-Newton iteration on the free
                variables only. */

    if( nfree > 0 ) {
      real plen, pleneps;
      real minstep;
      count i, mini = 0, minfix = 0;
      Point low;

      LinearSolve(t, nfree, hessian, gfree, p);
      plen = Length(nfree, p);
      pleneps = plen + RTEPS;

      minstep = INFTY;
      for( i = 0; i < nfree; ++i ) {
        count dim = Untag(ifree[i]);
        if( fabsx(p[i]) > EPS ) {
          real step;
          count fix;
          if( p[i] < 0 ) {
            step = (b[dim].lower - xmin[dim])/p[i];
            fix = dim;
          }
          else {
            step = (b[dim].upper - xmin[dim])/p[i];
            fix = Tag(dim);
          }
          if( step < minstep ) {
            minstep = step;
            mini = i;
            minfix = fix;
          }
        }
      }

      if( minstep*pleneps <= DELTA ) {
fixbound:
        ifix[nfix++] = minfix;

        if( mini < --nfree ) {
          creal diag = Hessian(mini, mini);

          Clear(tmp, mini);
          for( i = mini; i < nfree; ++i )
            tmp[i] = Hessian(i + 1, mini);

          for( i = mini; i < nfree; ++i ) {
            Move(&Hessian(i, 0), &Hessian(i + 1, 0), i);
            Hessian(i, i) = Hessian(i + 1, i + 1);
          }
          RenormalizeCholesky(t, nfree, hessian, tmp, diag);

          Move(&ifree[mini], &ifree[mini + 1], nfree - mini);
          Move(&gfree[mini], &gfree[mini + 1], nfree - mini);
        }
        continue;
      }

      low = LineSearch(t, nfree, ifree, p, xmin, fmin, tmp,
        Min(minstep, 1.), Min(minstep, 100.), Dot(nfree, gfree, p),
        RTEPS/pleneps, DELTA/pleneps, .2);

      if( low.dx > 0 ) {
        real fdiff;

        fmin = low.f;
        XCopy(xmin, tmp);

        Gradient(t, nfree, ifree, b, xmin, fmin, tmp);
        BFGS(t, nfree, hessian, tmp, gfree, p, low.dx);
        XCopy(gfree, tmp);

        if( fabsx(low.dx - minstep) < QEPS*minstep ) goto fixbound;

        fdiff = fini - fmin;
        fini = fmin;
        if( fdiff > (1 + fabsx(fmin))*FTOL ||
            low.dx*plen > (1 + Length(t->ndim, xmin))*FTOL ) continue;
      }
    }

    /* Step 2b: check whether freeing any fixed variable will lead
                to a reduction in f. */

releasebounds:
    if( nfix > 0 ) {
      real mingrad = INFTY;
      count i, mini = 0;
      bool repeat = false;

      Gradient(t, nfix, ifix, b, xmin, fmin, tmp);

      for( i = 0; i < nfix; ++i ) {
        creal grad = Sign(ifix[i])*tmp[i];
        if( grad < -RTEPS ) {
          repeat = true;
          if( grad < mingrad ) {
            mingrad = grad;
            mini = i;
          }
        }
      }

      if( repeat ) {
        gfree[nfree] = tmp[mini];
        ifree[nfree] = Untag(ifix[mini]);
        Clear(&Hessian(nfree, 0), nfree);
        Hessian(nfree, nfree) = 1;
        ++nfree;

        --nfix;
        Move(&ifix[mini], &ifix[mini + 1], nfix - mini);
        continue;
      }
    }

    break;
  }

  return fmin;
}
Beispiel #6
0
static real LocalSearch(This *t, ccount nfree, ccount *ifree,
  cBounds *b, creal *x, creal fx, real *z)
{
  Vector(real, y, NDIM);
  Vector(real, p, NDIM);
  real delta, smax, sopp, spmax, snmax;
  real fy, fz, ftest;
  int sign;
  count i;

  /* Choose a direction p along which to move away from the
     present x.  We choose the direction which leads farthest
     away from all borders. */

  smax = INFTY;
  for( i = 0; i < nfree; ++i ) {
    ccount dim = ifree[i];
    creal sp = b[dim].upper - x[dim];
    creal sn = x[dim] - b[dim].lower;
    if( sp < sn ) {
      smax = Min(smax, sn);
      p[i] = -1;
    }
    else {
      smax = Min(smax, sp);
      p[i] = 1;
    }
  }
  smax *= .9;

  /* Move along p until the integrand changes appreciably
     or we come close to a border. */

  XCopy(y, x);
  ftest = SUFTOL*(1 + fabsx(fx));
  delta = RTDELTA/5;
  do {
    delta = Min(5*delta, smax);
    for( i = 0; i < nfree; ++i ) {
      ccount dim = ifree[i];
      y[dim] = x[dim] + delta*p[i];
    }
    fy = Sample(t, y);
    if( fabsx(fy - fx) > ftest ) break;
  } while( delta != smax );

  /* Construct a second direction p' orthogonal to p, i.e. p.p' = 0.
     We let pairs of coordinates cancel in the dot product,
     i.e. we choose p'[0] = p[0], p'[1] = -p[1], etc.
     (It should really be 1/p and -1/p, but p consists of 1's and -1's.)
     For odd nfree, we let the last three components cancel by 
     choosing p'[nfree - 3] = p[nfree - 3],
              p'[nfree - 2] = -1/2 p[nfree - 2], and
              p'[nfree - 1] = -1/2 p[nfree - 1]. */

  sign = (nfree <= 1 && fy > fx) ? 1 : -1;
  spmax = snmax = INFTY;
  for( i = 0; i < nfree; ++i ) {
    ccount dim = ifree[i];
    real sp, sn;
    p[i] *= (nfree & 1 && nfree - i <= 2) ? -.5*sign : (sign = -sign);
    sp = (b[dim].upper - y[dim])/p[i];
    sn = (y[dim] - b[dim].lower)/p[i];
    if( p[i] > 0 ) {
      spmax = Min(spmax, sp);
      snmax = Min(snmax, sn);
    }
    else {
      spmax = Min(spmax, -sn);
      snmax = Min(snmax, -sp);
    }
  }
  smax = .9*spmax;
  sopp = .9*snmax;

  if( nfree > 1 && smax < snmax ) {
    real tmp = smax;
    smax = sopp;
    sopp = tmp;
    for( i = 0; i < nfree; ++i )
      p[i] = -p[i];
  }

  /* Move along p' until the integrand changes appreciably
     or we come close to a border. */

  XCopy(z, y);
  ftest = SUFTOL*(1 + fabsx(fy));
  delta = RTDELTA/5;
  do {
    delta = Min(5*delta, smax);
    for( i = 0; i < nfree; ++i ) {
      ccount dim = ifree[i];
      z[dim] = y[dim] + delta*p[i];
    }
    fz = Sample(t, z);
    if( fabsx(fz - fy) > ftest ) break;
  } while( delta != smax );

  if( fy != fz ) {
    real pleneps, grad, range, step;
    Point low;

    if( fy > fz ) {
      grad = (fz - fy)/delta;
      range = smax/.9;
      step = Min(delta + delta, smax);
    }
    else {
      grad = (fy - fz)/delta;
      range = sopp/.9 + delta;
      step = Min(delta + delta, sopp);
      XCopy(y, z);
      fy = fz;
      for( i = 0; i < nfree; ++i )
        p[i] = -p[i];
    }

    pleneps = Length(nfree, p) + RTEPS;
    low = LineSearch(t, nfree, ifree, p, y, fy, z, step, range, grad,
      RTEPS/pleneps, 0., RTEPS);
    fz = low.f;
  }

  if( fz != fx ) {
    real pleneps, grad, range, step;
    Point low;

    spmax = snmax = INFTY;
    for( i = 0; i < nfree; ++i ) {
      ccount dim = ifree[i];
      p[i] = z[dim] - x[dim];
      if( p[i] != 0 ) {
        creal sp = (b[dim].upper - x[dim])/p[i];
        creal sn = (x[dim] - b[dim].lower)/p[i];
        if( p[i] > 0 ) {
          spmax = Min(spmax, sp);
          snmax = Min(snmax, sn);
        }
        else {
          spmax = Min(spmax, -sn);
          snmax = Min(snmax, -sp);
        }
      }
    }

    grad = fz - fx;
    range = spmax;
    step = Min(.9*spmax, 2.);
    pleneps = Length(nfree, p) + RTEPS;
    if( fz > fx ) {
      delta = Min(.9*snmax, RTDELTA/pleneps);
      for( i = 0; i < nfree; ++i ) {
        ccount dim = ifree[i];
        z[dim] = x[dim] - delta*p[i];
      }
      fz = Sample(t, z);
      if( fz < fx ) {
        grad = (fz - fx)/delta;
        range = snmax;
        step = Min(.9*snmax, delta + delta);
        for( i = 0; i < nfree; ++i )
          p[i] = -p[i];
      }
      else if( delta < 1 ) grad = (fx - fz)/delta;
    }

    low = LineSearch(t, nfree, ifree, p, x, fx, z, step, range, grad,
      RTEPS/pleneps, 0., RTEPS);
    fz = low.f;
  }

  return fz;
}
bool
GaussNewton(Function& f,
            real_type t,
            Vector& x,
            real_type atol, real_type rtol,
            unsigned *itCount,
            unsigned maxit,
            unsigned maxjac,
            real_type lambdamin)
{
  Vector err, dx;
  Matrix J;
#define USE_QR
#ifdef USE_QR
  LinAlg::MatrixFactors<real_type,0,0,LinAlg::QRTag> jacFactors;
#else
  LinAlg::MatrixFactors<real_type,0,0,LinAlg::LUTag> jacFactors;
#endif

  bool converged;
  do {
    // Compute in each step a new jacobian
    f.jac(t, x, J);
    Log(NewtonMethod, Debug) << "Jacobian is:\n" << J << endl;
#ifdef USE_QR
    jacFactors = J;
#else
    jacFactors = trans(J)*J;
#endif
    Log(NewtonMethod, Debug) << "Jacobian is "
                             << (jacFactors.singular() ? "singular" : "ok")
                             << endl;
   
    // Compute the actual error
    f.eval(t, x, err);

    // Compute the search direction
#ifdef USE_QR
    dx = jacFactors.solve(err);
#else
    dx = jacFactors.solve(trans(J)*err);
#endif
    Log(NewtonMethod, Debug) << "dx residual "
                             << trans(J*dx - err) << endl
                             << trans(J*dx - err)*J
                             << endl;

    // Get a better search guess
    if (1 < norm(dx))
      dx = normalize(dx);
    Vector xnew = LineSearch(f, t, x, -dx, 1.0, atol);

    // check convergence
    converged = norm1(xnew - x) < atol;
    

    Log(NewtonMethod, Debug) << "Convergence test: |dx| = " << norm(xnew - x)
                             << ", converged = " << converged << endl;
    // New guess is the better one
    x = xnew;
  } while (!converged);

  return converged;
}