static double nloptObjectiveFunction(unsigned n, const double *x, double *grad, void *f_data) { GradientOptimizerContext *goc = (GradientOptimizerContext *) f_data; nlopt_opt opt = (nlopt_opt) goc->extraData; FitContext *fc = goc->fc; assert(n == fc->numParam); int mode = 0; double fit = goc->solFun((double*) x, &mode); if (grad) { fc->iterations += 1; if (goc->maxMajorIterations != -1 && fc->iterations >= goc->maxMajorIterations) { nlopt_force_stop(opt); } } if (grad && goc->verbose >= 2) { mxLog("major iteration fit=%.12f", fit); } if (mode == -1) { if (!goc->feasible) { nlopt_force_stop(opt); } return nan("infeasible"); } if (!grad) return fit; Eigen::Map< Eigen::VectorXd > Epoint((double*) x, n); Eigen::Map< Eigen::VectorXd > Egrad(grad, n); if (fc->wanted & FF_COMPUTE_GRADIENT) { Egrad = fc->grad; } else if (fc->CI && fc->CI->varIndex >= 0) { Egrad.setZero(); Egrad[fc->CI->varIndex] = fc->lowerBound? 1 : -1; fc->grad = Egrad; } else { if (goc->verbose >= 3) mxLog("fd_gradient start"); fit_functional ff(*goc); gradient_with_ref(goc->gradientAlgo, goc->gradientIterations, goc->gradientStepSize, ff, fit, Epoint, Egrad); fc->grad = Egrad; } if (goc->verbose >= 3) { mxPrintMat("gradient", Egrad); } return fit; }
void omxSD(GradientOptimizerContext &rf) { int maxIter = rf.maxMajorIterations; if (maxIter == -1) maxIter = 50000; Eigen::VectorXd currEst(rf.numFree); rf.copyToOptimizer(currEst.data()); int iter = 0; double priorSpeed = 1.0, shrinkage = 0.7; rf.setupSimpleBounds(); rf.informOut = INFORM_UNINITIALIZED; { int mode = 0; rf.solFun(currEst.data(), &mode); if (mode == -1) { rf.informOut = INFORM_STARTING_VALUES_INFEASIBLE; return; } } double refFit = rf.getFit(); rf.grad.resize(rf.numFree); fit_functional ff(rf); Eigen::VectorXd majorEst = currEst; while(++iter < maxIter && !isErrorRaised()) { gradient_with_ref(rf.gradientAlgo, 1, rf.gradientIterations, rf.gradientStepSize, ff, refFit, majorEst, rf.grad); if (rf.verbose >= 3) mxPrintMat("grad", rf.grad); if(rf.grad.norm() == 0) { rf.informOut = INFORM_CONVERGED_OPTIMUM; if(rf.verbose >= 2) mxLog("After %i iterations, gradient achieves zero!", iter); break; } int retries = 300; double speed = std::min(priorSpeed, 1.0); double bestSpeed = speed; bool foundBetter = false; Eigen::VectorXd bestEst(majorEst.size()); Eigen::VectorXd prevEst(majorEst.size()); Eigen::VectorXd searchDir = rf.grad; searchDir /= searchDir.norm(); prevEst.setConstant(nan("uninit")); while (--retries > 0 && !isErrorRaised()){ Eigen::VectorXd nextEst = majorEst - speed * searchDir; nextEst = nextEst.cwiseMax(rf.solLB).cwiseMin(rf.solUB); if (nextEst == prevEst) break; prevEst = nextEst; rf.checkActiveBoxConstraints(nextEst); int mode = 0; double fit = rf.solFun(nextEst.data(), &mode); if (fit < refFit) { foundBetter = true; refFit = rf.getFit(); bestSpeed = speed; bestEst = nextEst; break; } speed *= shrinkage; } if (false && foundBetter) { // In some tests, this did not help so it is not enabled. // It might be worth testing more. mxLog("trying larger step size"); retries = 3; while (--retries > 0 && !isErrorRaised()){ speed *= 1.01; Eigen::VectorXd nextEst = majorEst - speed * searchDir; nextEst = nextEst.cwiseMax(rf.solLB).cwiseMin(rf.solUB); rf.checkActiveBoxConstraints(nextEst); int mode = 0; double fit = rf.solFun(nextEst.data(), &mode); if (fit < refFit) { foundBetter = true; refFit = rf.getFit(); bestSpeed = speed; bestEst = nextEst; } } } if (!foundBetter) { rf.informOut = INFORM_CONVERGED_OPTIMUM; if(rf.verbose >= 2) mxLog("After %i iterations, cannot find better estimation along the gradient direction", iter); break; } if (rf.verbose >= 2) mxLog("major fit %f bestSpeed %g", refFit, bestSpeed); majorEst = bestEst; priorSpeed = bestSpeed * 1.1; } rf.est = majorEst; if ((rf.grad.array().abs() > 0.1).any()) { rf.informOut = INFORM_NOT_AT_OPTIMUM; } if (iter >= maxIter - 1) { rf.informOut = INFORM_ITERATION_LIMIT; if(rf.verbose >= 2) mxLog("Maximum iteration achieved!"); } if(rf.verbose >= 1) mxLog("Status code : %i", rf.informOut); }