Example #1
0
int OptCGLike::checkConvg() // check convergence
{
  NLP1* nlp = nlprob();
  ColumnVector xc(nlp->getXc());

// Test 1. step tolerance 

  double step_tol = tol.getStepTol();
  double snorm = stepTolNorm();
  double xnorm =  Norm2(xc);
  double stol  = step_tol*max(1.0,xnorm);
  if (snorm  <= stol) {
    strcpy(mesg,"Algorithm converged - Norm of last step is less than step tolerance");
    *optout << "checkConvg: snorm = " << e(snorm,12,4) 
      << "  stol = " << e(stol,12,4) << "\n";
    return 1;
  }
  
// Test 2. function tolerance
  double ftol = tol.getFTol();
  double fvalue = nlp->getF();
  double rftol = ftol*max(1.0,fabs(fvalue));
  Real deltaf = fprev - fvalue;
  if (deltaf <= rftol) {
    strcpy(mesg,"Algorithm converged - Difference in successive fcn values less than tolerance");
    *optout << "checkConvg: deltaf = " << e(deltaf,12,4) 
         << "  ftol = " << e(ftol,12,4) << "\n";
    return 2;
  }
  

// Test 3. gradient tolerance 

  ColumnVector grad(nlp->getGrad());
  double gtol = tol.getGTol();
  double rgtol = gtol*max(1.0,fabs(fvalue));
  double gnorm = Norm2(grad);
  if (gnorm <= rgtol) {
    strcpy(mesg,"Algorithm converged - Norm of gradient is less than gradient tolerance");
    *optout << "checkConvg: gnorm = " << e(gnorm,12,4) 
      << "  gtol = " << e(rgtol, 12,4) << "\n";
    return 3;
  }
  

// Test 4. absolute gradient tolerance 

  if (gnorm <= gtol) {
    strcpy(mesg,"Algorithm converged - Norm of gradient is less than gradient tolerance");
    *optout << "checkConvg: gnorm = " << e(gnorm,12,4) 
      << "  gtol = " << e(gtol, 12,4) << "\n";
    return 4;
  }
  
  // Nothing to report 

  return 0;

}
Example #2
0
//------------------------------------------------------------------------
// checkConvg - check whether the distance between upper and lower bounds
//              is less than a prescribed threshold.
//------------------------------------------------------------------------
int OptBCEllipsoid::checkConvg() 
{
  NLP1         *nlp = nlprob();
  SerialDenseVector<int,double> xc(nlp->getXc());
  double       fvalue = nlp->getF();
  double       ftol = tol.getFTol();
  double       delta;

  fval_upbound  = min(fval_upbound,fvalue);
  delta = fabs(fval_upbound - fval_lowbound);
  if (delta <= ftol) {
    strcpy(mesg,"Algorithm converged - Difference in successive fcn values less than tolerance");
    ret_code = 2;
    setReturnCode(ret_code);
    return 1;
  } else 
    return 0;
}
Example #3
0
//---------------------------------------------------------------------------- 
// Given a nonlinear operator nlp find the minimizer using a
//---------------------------------------------------------------------------- 
void OptBCEllipsoid::optimize()
{
  NLP1*  nlp = nlprob();
  int convgd = 0;
  int       i,n = nlp->getDim(), step_type;
  SerialDenseVector<int,double> xc(nlp->getXc().length()),xscale(getXScale().length()),xs(n);
  xc = nlp->getXc();
  xscale = getXScale();
  double          psi, dtmp;

  // Read input file and output initial message to file 
  initOpt();

  if (ret_code == 0) {
    iter_taken = 0;

    // Initialize convergence test variables 
    fval_lowbound = -FLT_MAX;
    fval_upbound  = FLT_MAX;

    // Initialize the A matrix
    SerialSymDenseMatrix<int,double> A(n);
    if (xscal_flag != 1) {xscale.resize(n); xscale = 1.0;}
    dtmp = initial_radius * initial_radius;
    A = 0.0;
    for (i=0; i<n; i++) A(i,i) = dtmp / (xscale(i) * xscale(i));

    // scale the initial guess (if scaling is desired)
    for (i=0; i<n; i++) xc(i) = xc(i) / xscale(i);

    // declare other vectors used in the iterations
    SerialDenseVector<int,double> ghk(n), aghk(n), aghkscal(n);

    // assuming that the function has been evaluated, get the value
    fprev = nlp->getF();

    // Move the initial guess into the feasible region, if needed
    for (i=0; i<n; i++) xs(i) = xc(i) * xscale(i);
    psi = computeFeasibility(xs);
    if (psi > 0.0) infeasibilityStep(xc,A,psi);

    while (convgd == 0) { 

      iter_taken++;
      //*optout << " **** OptBCEllipsoid : iteration count = " 
      //	 << iter_taken << "\n";

      // put away the last solution to prepare for current iteration 
      xprev = nlp->getXc();

      // perform one ellipsoid iteration (xc,A changed upon return)
      step_type = halfSpaceStep(xc,A,psi);

      // if the next solution is infeasible, do deep cut
      if (step_type == -1) infeasibilityStep(xc,A,psi);

      // update solution and update function value
      for (i=0; i<n; i++) xs(i) = xc(i) * xscale(i);
      nlp->setX(xs);
      fprev = nlp->evalF();

      // check convergence
      acceptStep(iter_taken, 0);
      convgd = checkConvg();

      // debug  information - volume of ellipsoid
      //logdeterminant = A.LogDeterminant();
      //dtmp = 0.5 * n + 1.0;
      //determinant = sqrt(logdeterminant.Value()) * pow(pi,dtmp-1.0) 
      //					       / ComputeGamma(dtmp);
      //*optout << "Volume of current ellipsoid = " << determinant << "\n";
    }
  }
}