Beispiel #1
0
static double nloptObjectiveFunction(unsigned n, const double *x, double *grad, void *f_data)
{
	GradientOptimizerContext *goc = (GradientOptimizerContext *) f_data;
	nlopt_opt opt = (nlopt_opt) goc->extraData;
	FitContext *fc = goc->fc;
	assert(n == fc->numParam);
	int mode = 0;
	double fit = goc->solFun((double*) x, &mode);
	if (grad) {
		fc->iterations += 1;
		if (goc->maxMajorIterations != -1 && fc->iterations >= goc->maxMajorIterations) {
			nlopt_force_stop(opt);
		}
	}
	if (grad && goc->verbose >= 2) {
		mxLog("major iteration fit=%.12f", fit);
	}
	if (mode == -1) {
		if (!goc->feasible) {
			nlopt_force_stop(opt);
		}
		return nan("infeasible");
	}
	if (!grad) return fit;

	Eigen::Map< Eigen::VectorXd > Epoint((double*) x, n);
	Eigen::Map< Eigen::VectorXd > Egrad(grad, n);
	if (fc->wanted & FF_COMPUTE_GRADIENT) {
		Egrad = fc->grad;
	} else if (fc->CI && fc->CI->varIndex >= 0) {
		Egrad.setZero();
		Egrad[fc->CI->varIndex] = fc->lowerBound? 1 : -1;
		fc->grad = Egrad;
	} else {
		if (goc->verbose >= 3) mxLog("fd_gradient start");
		fit_functional ff(*goc);
		gradient_with_ref(goc->gradientAlgo, goc->gradientIterations, goc->gradientStepSize,
				  ff, fit, Epoint, Egrad);
		fc->grad = Egrad;
	}
	if (goc->verbose >= 3) {
		mxPrintMat("gradient", Egrad);
	}
	return fit;
}
Beispiel #2
0
void omxSD(GradientOptimizerContext &rf)
{
	int maxIter = rf.maxMajorIterations;
	if (maxIter == -1) maxIter = 50000;

	Eigen::VectorXd currEst(rf.numFree);
	rf.copyToOptimizer(currEst.data());

    int iter = 0;
	double priorSpeed = 1.0, shrinkage = 0.7;
    rf.setupSimpleBounds();
    rf.informOut = INFORM_UNINITIALIZED;

    {
	    int mode = 0;
	    rf.solFun(currEst.data(), &mode);
	    if (mode == -1) {
		    rf.informOut = INFORM_STARTING_VALUES_INFEASIBLE;
		    return;
	    }
    }
    double refFit = rf.getFit();

    rf.grad.resize(rf.numFree);

    fit_functional ff(rf);
    Eigen::VectorXd majorEst = currEst;

    while(++iter < maxIter && !isErrorRaised()) {
	    gradient_with_ref(rf.gradientAlgo, 1, rf.gradientIterations, rf.gradientStepSize,
			      ff, refFit, majorEst, rf.grad);

	    if (rf.verbose >= 3) mxPrintMat("grad", rf.grad);

        if(rf.grad.norm() == 0)
        {
            rf.informOut = INFORM_CONVERGED_OPTIMUM;
            if(rf.verbose >= 2) mxLog("After %i iterations, gradient achieves zero!", iter);
            break;
        }

        int retries = 300;
        double speed = std::min(priorSpeed, 1.0);
	double bestSpeed = speed;
	bool foundBetter = false;
	Eigen::VectorXd bestEst(majorEst.size());
	Eigen::VectorXd prevEst(majorEst.size());
	Eigen::VectorXd searchDir = rf.grad;
	searchDir /= searchDir.norm();
	prevEst.setConstant(nan("uninit"));
        while (--retries > 0 && !isErrorRaised()){
		Eigen::VectorXd nextEst = majorEst - speed * searchDir;
		nextEst = nextEst.cwiseMax(rf.solLB).cwiseMin(rf.solUB);

		if (nextEst == prevEst) break;
		prevEst = nextEst;

		rf.checkActiveBoxConstraints(nextEst);

		int mode = 0;
		double fit = rf.solFun(nextEst.data(), &mode);
		if (fit < refFit) {
			foundBetter = true;
			refFit = rf.getFit();
			bestSpeed = speed;
			bestEst = nextEst;
			break;
		}
		speed *= shrinkage;
        }

	if (false && foundBetter) {
		// In some tests, this did not help so it is not enabled.
		// It might be worth testing more.
		mxLog("trying larger step size");
		retries = 3;
		while (--retries > 0 && !isErrorRaised()){
			speed *= 1.01;
			Eigen::VectorXd nextEst = majorEst - speed * searchDir;
			nextEst = nextEst.cwiseMax(rf.solLB).cwiseMin(rf.solUB);
			rf.checkActiveBoxConstraints(nextEst);
			int mode = 0;
			double fit = rf.solFun(nextEst.data(), &mode);
			if (fit < refFit) {
				foundBetter = true;
				refFit = rf.getFit();
				bestSpeed = speed;
				bestEst = nextEst;
			}
		}
	}

        if (!foundBetter) {
            rf.informOut = INFORM_CONVERGED_OPTIMUM;
            if(rf.verbose >= 2) mxLog("After %i iterations, cannot find better estimation along the gradient direction", iter);
            break;
        }

	if (rf.verbose >= 2) mxLog("major fit %f bestSpeed %g", refFit, bestSpeed);
	majorEst = bestEst;
	priorSpeed = bestSpeed * 1.1;
    }
    rf.est = majorEst;
    if ((rf.grad.array().abs() > 0.1).any()) {
	    rf.informOut = INFORM_NOT_AT_OPTIMUM;
    }
    if (iter >= maxIter - 1) {
            rf.informOut = INFORM_ITERATION_LIMIT;
            if(rf.verbose >= 2) mxLog("Maximum iteration achieved!");
    }
    if(rf.verbose >= 1) mxLog("Status code : %i", rf.informOut);
}
Beispiel #3
0
void omxInvokeNLOPT(double *est, GradientOptimizerContext &goc)
{
	goc.optName = "SLSQP";
	goc.setupSimpleBounds();
	goc.useGradient = true;

	FitContext *fc = goc.fc;
	int oldWanted = fc->wanted;
	fc->wanted = 0;
	omxState *globalState = fc->state;
    
        nlopt_opt opt = nlopt_create(NLOPT_LD_SLSQP, fc->numParam);
	goc.extraData = opt;
        //local_opt = nlopt_create(NLOPT_LD_SLSQP, n); // Subsidiary algorithm
        
        //nlopt_set_local_optimizer(opt, local_opt);
        nlopt_set_lower_bounds(opt, goc.solLB.data());
        nlopt_set_upper_bounds(opt, goc.solUB.data());

	int eq, ieq;
	globalState->countNonlinearConstraints(eq, ieq);

	if (fc->CI) {
		nlopt_set_xtol_rel(opt, 5e-3);
		std::vector<double> tol(fc->numParam, std::numeric_limits<double>::epsilon());
		nlopt_set_xtol_abs(opt, tol.data());
	} else {
		// The *2 is there to roughly equate accuracy with NPSOL.
		nlopt_set_ftol_rel(opt, goc.ControlTolerance * 2);
		nlopt_set_ftol_abs(opt, std::numeric_limits<double>::epsilon());
	}
        
	nlopt_set_min_objective(opt, SLSQP::nloptObjectiveFunction, &goc);

	double feasibilityTolerance = Global->feasibilityTolerance;
	SLSQP::context ctx(goc);
        if (eq + ieq) {
		ctx.origeq = eq;
                if (ieq > 0){
			goc.inequality.resize(ieq);
			std::vector<double> tol(ieq, feasibilityTolerance);
			nlopt_add_inequality_mconstraint(opt, ieq, SLSQP::nloptInequalityFunction, &goc, tol.data());
                }
                
                if (eq > 0){
			goc.equality.resize(eq);
			std::vector<double> tol(eq, feasibilityTolerance);
			nlopt_add_equality_mconstraint(opt, eq, SLSQP::nloptEqualityFunction, &ctx, tol.data());
                }
	}
        
	int priorIterations = fc->iterations;

	int code = nlopt_optimize(opt, est, &fc->fit);
	if (ctx.eqredundent) {
		nlopt_remove_equality_constraints(opt);
		eq -= ctx.eqredundent;
		std::vector<double> tol(eq, feasibilityTolerance);
		nlopt_add_equality_mconstraint(opt, eq, SLSQP::nloptEqualityFunction, &ctx, tol.data());

		code = nlopt_optimize(opt, est, &fc->fit);
	}

	if (goc.verbose >= 2) mxLog("nlopt_optimize returned %d", code);

        nlopt_destroy(opt);

	fc->wanted = oldWanted;

	if (code == NLOPT_INVALID_ARGS) {
		Rf_error("NLOPT invoked with invalid arguments");
	} else if (code == NLOPT_OUT_OF_MEMORY) {
		Rf_error("NLOPT ran out of memory");
	} else if (code == NLOPT_FORCED_STOP) {
		if (fc->iterations - priorIterations <= 1) {
			goc.informOut = INFORM_STARTING_VALUES_INFEASIBLE;
		} else {
			goc.informOut = INFORM_ITERATION_LIMIT;
		}
	} else if (code == NLOPT_ROUNDOFF_LIMITED) {
		if (fc->iterations - priorIterations <= 2) {
			Rf_error("%s: Failed due to singular matrix E or C in LSQ subproblem or "
				 "rank-deficient equality constraint subproblem or "
				 "positive directional derivative in line search", goc.optName);
		} else {
			goc.informOut = INFORM_NOT_AT_OPTIMUM;  // is this correct? TODO
		}
	} else if (code < 0) {
		Rf_error("NLOPT fatal error %d", code);
	} else if (code == NLOPT_MAXEVAL_REACHED) {
		goc.informOut = INFORM_ITERATION_LIMIT;
	} else {
		goc.informOut = INFORM_CONVERGED_OPTIMUM;
	}
}