static double user_function(unsigned n, const double *x,
			    double *gradient, /* NULL if not needed */
			    void *d_)
{
  user_function_data *d = (user_function_data *) d_;
  double f;

  d->plhs[0] = d->plhs[1] = NULL;
  memcpy(mxGetPr(d->prhs[d->xrhs]), x, n * sizeof(double));

  CHECK0(0 == mexCallMATLAB(gradient ? 2 : 1, d->plhs, 
			   d->nrhs, d->prhs, d->f),
	"error calling user function");

  CHECK0(mxIsNumeric(d->plhs[0]) && !mxIsComplex(d->plhs[0]) 
	&& mxGetM(d->plhs[0]) * mxGetN(d->plhs[0]) == 1,
	"user function must return real scalar");
  f = mxGetScalar(d->plhs[0]);
  mxDestroyArray(d->plhs[0]);
  if (gradient) {
     CHECK0(mxIsDouble(d->plhs[1]) && !mxIsComplex(d->plhs[1])
	   && (mxGetM(d->plhs[1]) == 1 || mxGetN(d->plhs[1]) == 1)
	   && mxGetM(d->plhs[1]) * mxGetN(d->plhs[1]) == n,
	   "gradient vector from user function is the wrong size");
     memcpy(gradient, mxGetPr(d->plhs[1]), n * sizeof(double));
     mxDestroyArray(d->plhs[1]);
  }
  d->neval++;
  if (d->verbose) mexPrintf("nlopt_optimize eval #%d: %g\n", d->neval, f);
  if (mxIsNaN(f)) nlopt_force_stop(d->opt);
  return f;
}
Exemple #2
0
static void nloptInequalityFunction(unsigned m, double *result, unsigned n, const double* x, double* grad, void* f_data)
{
	GradientOptimizerContext *goc = (GradientOptimizerContext *) f_data;
	assert(n == goc->fc->numParam);
	Eigen::Map< Eigen::VectorXd > Epoint((double*)x, n);
	Eigen::Map< Eigen::VectorXd > Eresult(result, m);
	Eigen::Map< Eigen::MatrixXd > jacobian(grad, n, m);
	if (grad && goc->verbose >= 2) {
		if (m == 1) {
			mxLog("major iteration ineq=%.12f", Eresult[0]);
		} else {
			mxPrintMat("major iteration ineq", Eresult);
		}
	}
	inequality_functional ff(*goc);
	ff(Epoint, Eresult);
	if (grad) {
		fd_jacobian(goc->gradientAlgo, goc->gradientIterations, goc->gradientStepSize,
			    ff, Eresult, Epoint, jacobian);
		if (!std::isfinite(Eresult.sum())) {
			// infeasible at start of major iteration
			nlopt_opt opt = (nlopt_opt) goc->extraData;
			nlopt_force_stop(opt);
		}
	}
	if (goc->verbose >= 3 && grad) {
		mxPrintMat("inequality jacobian", jacobian);
	}
}
Exemple #3
0
static double nloptObjectiveFunction(unsigned n, const double *x, double *grad, void *f_data)
{
	GradientOptimizerContext *goc = (GradientOptimizerContext *) f_data;
	nlopt_opt opt = (nlopt_opt) goc->extraData;
	FitContext *fc = goc->fc;
	assert(n == fc->numParam);
	int mode = 0;
	double fit = goc->solFun((double*) x, &mode);
	if (grad) {
		fc->iterations += 1;
		if (goc->maxMajorIterations != -1 && fc->iterations >= goc->maxMajorIterations) {
			nlopt_force_stop(opt);
		}
	}
	if (grad && goc->verbose >= 2) {
		mxLog("major iteration fit=%.12f", fit);
	}
	if (mode == -1) {
		if (!goc->feasible) {
			nlopt_force_stop(opt);
		}
		return nan("infeasible");
	}
	if (!grad) return fit;

	Eigen::Map< Eigen::VectorXd > Epoint((double*) x, n);
	Eigen::Map< Eigen::VectorXd > Egrad(grad, n);
	if (fc->wanted & FF_COMPUTE_GRADIENT) {
		Egrad = fc->grad;
	} else if (fc->CI && fc->CI->varIndex >= 0) {
		Egrad.setZero();
		Egrad[fc->CI->varIndex] = fc->lowerBound? 1 : -1;
		fc->grad = Egrad;
	} else {
		if (goc->verbose >= 3) mxLog("fd_gradient start");
		fit_functional ff(*goc);
		gradient_with_ref(goc->gradientAlgo, goc->gradientIterations, goc->gradientStepSize,
				  ff, fit, Epoint, Egrad);
		fc->grad = Egrad;
	}
	if (goc->verbose >= 3) {
		mxPrintMat("gradient", Egrad);
	}
	return fit;
}
Exemple #4
0
static void nloptEqualityFunction(unsigned m, double* result, unsigned n, const double* x, double* grad, void* f_data)
{
	context &ctx = *(context *)f_data;
	GradientOptimizerContext &goc = ctx.goc;
	assert(n == goc.fc->numParam);
	Eigen::Map< Eigen::VectorXd > Epoint((double*)x, n);
	Eigen::VectorXd Eresult(ctx.origeq);
	Eigen::MatrixXd jacobian(n, ctx.origeq);
	equality_functional ff(goc);
	ff(Epoint, Eresult);
	if (grad) {
		fd_jacobian(goc.gradientAlgo, goc.gradientIterations, goc.gradientStepSize,
			    ff, Eresult, Epoint, jacobian);
		if (ctx.eqmask.size() == 0) {
			ctx.eqmask.assign(m, false);
			for (int c1=0; c1 < int(m-1); ++c1) {
				for (int c2=c1+1; c2 < int(m); ++c2) {
					bool match = (Eresult[c1] == Eresult[c2] &&
						      (jacobian.col(c1) == jacobian.col(c2)));
					if (match && !ctx.eqmask[c2]) {
						ctx.eqmask[c2] = match;
						++ctx.eqredundent;
						if (goc.verbose >= 2) {
							mxLog("nlopt: eq constraint %d is redundent with %d",
							      c1, c2);
						}
					}
				}
			}
			if (ctx.eqredundent) {
				if (goc.verbose >= 1) {
					mxLog("nlopt: detected %d redundent equality constraints; retrying",
					      ctx.eqredundent);
				}
				nlopt_opt opt = (nlopt_opt) goc.extraData;
				nlopt_force_stop(opt);
			}
		}
	}
	Eigen::Map< Eigen::VectorXd > Uresult(result, m);
	Eigen::Map< Eigen::MatrixXd > Ujacobian(grad, n, m);
	int dx=0;
	for (int cx=0; cx < int(m); ++cx) {
		if (ctx.eqmask[cx]) continue;
		Uresult[dx] = Eresult[cx];
		if (grad) {
			Ujacobian.col(dx) = jacobian.col(cx);
		}
		++dx;
	}
	if (goc.verbose >= 4 && grad) {
		mxPrintMat("eq result", Uresult);
		mxPrintMat("eq jacobian", Ujacobian);
	}
}
Exemple #5
0
double Callback_Eval_Gradient(unsigned n, const double *x, double *grad, void *my_func_data)
{
	memcpy(Para_List, x, sizeof(double)*n);

	if(grad)	{
		CalGradient(grad);	//gradient will be assigned into objGrad automatically
	}
	else {
		Distribute_Torsion_Parameters();
		Assign_Torsion_Parameters();
		Cal_E_MM_Rotamer();

		CalObjectiveFunction((double*)x, 0);
	}

		Iteration++;
		if(ProgID==0)	{
			fprintf(fFitting, "Iteration %4d  Chi^2 = %.8lf  Chi^2(1D) = %.8lf  Chi^2(rotamer) = %.8lf\n", 
				Iteration, Chi_SQ, Chi_SQ_1D, Chi_SQ_Rotamer);
			fflush(fFitting);
		}

		if(Chi_SQ < Chi_SQ_Min_Global)	{
			Save_Parameters();
			memcpy(Para_Best, Para_List, sizeof(double)*n);
			Chi_SQ_Min_Global = Chi_SQ;
      printf("Iteration %d : ", Iteration );
      for(int i=0; i<n; i++ ) { printf("%f ", Para_Best[i]); }
      printf(" : %f\n", Chi_SQ );
		}

		if(Chi_SQ < Chi_SQ_Min)	{
			Chi_SQ_Min = Chi_SQ;
			
			if(Chi_SQ + 2.0E-5 > Chi_SQ_Min)	{
				FailCount++;
			}
			else	{
				FailCount = 0;
			}
		}
		else	{
			FailCount++;
			if(FailCount > 6)      {       // cannot make further progress
        printf("Terminating..\n");
				nlopt_force_stop(opt);	// terminate the optimization
			}
		}

    return Chi_SQ;
}
Exemple #6
0
double CNLopt::ErrorFunc(unsigned int nParams, const double* params, double* grad, void * misc)
{
	// Get the "this" pointer
        CNLopt * minimizer = reinterpret_cast<CNLopt*>(misc);
	minimizer->mEvals++;
	// See if we have been requested to exit
	if(!minimizer->mRun)
	{
	  nlopt_force_stop(minimizer->mOpt);
	}

	// Set the free parameters to the current nominal values determined by the minimizer
	CModelListPtr model_list = minimizer->mWorkerThread->GetModelList();
	model_list->SetFreeParameters(params, nParams, false);

	minimizer->mWorkerThread->GetChi(&minimizer->mChis[0], minimizer->mChis.size());
	return ComputeChi2r(minimizer->mChis, minimizer->mNParams);
}
Exemple #7
0
double NLfit::calculate(int n, const double* par, double* grad)
{
    assert(n == na_);
    vector<realt> A(par, par+n);
    if (F_->get_verbosity() >= 1)
        output_tried_parameters(A);
    bool stop = common_termination_criteria();
    if (stop)
        nlopt_force_stop(opt_);

    double wssr;
    if (!grad || stop)
        wssr = compute_wssr(A, fitted_datas_);
    else
        wssr = compute_wssr_gradient(A, fitted_datas_, grad);
    if (F_->get_verbosity() >= 1)
        F_->ui()->mesg(iteration_info(wssr));
    return wssr;
}