void Sqpmethod::init() { // Call the init method of the base class NlpSolverInternal::init(); // Read options max_iter_ = getOption("max_iter"); max_iter_ls_ = getOption("max_iter_ls"); c1_ = getOption("c1"); beta_ = getOption("beta"); merit_memsize_ = getOption("merit_memory"); lbfgs_memory_ = getOption("lbfgs_memory"); tol_pr_ = getOption("tol_pr"); tol_du_ = getOption("tol_du"); regularize_ = getOption("regularize"); exact_hessian_ = getOption("hessian_approximation")=="exact"; min_step_size_ = getOption("min_step_size"); // Get/generate required functions gradF(); jacG(); if (exact_hessian_) { hessLag(); } // Allocate a QP solver Sparsity H_sparsity = exact_hessian_ ? hessLag().output().sparsity() : Sparsity::dense(nx_, nx_); H_sparsity = H_sparsity + Sparsity::diag(nx_); Sparsity A_sparsity = jacG().isNull() ? Sparsity(0, nx_) : jacG().output().sparsity(); // QP solver options Dict qp_solver_options; if (hasSetOption("qp_solver_options")) { qp_solver_options = getOption("qp_solver_options"); } // Allocate a QP solver qp_solver_ = QpSolver("qp_solver", getOption("qp_solver"), make_map("h", H_sparsity, "a", A_sparsity), qp_solver_options); // Lagrange multipliers of the NLP mu_.resize(ng_); mu_x_.resize(nx_); // Lagrange gradient in the next iterate gLag_.resize(nx_); gLag_old_.resize(nx_); // Current linearization point x_.resize(nx_); x_cand_.resize(nx_); x_old_.resize(nx_); // Constraint function value gk_.resize(ng_); gk_cand_.resize(ng_); // Hessian approximation Bk_ = DMatrix::zeros(H_sparsity); // Jacobian Jk_ = DMatrix::zeros(A_sparsity); // Bounds of the QP qp_LBA_.resize(ng_); qp_UBA_.resize(ng_); qp_LBX_.resize(nx_); qp_UBX_.resize(nx_); // QP solution dx_.resize(nx_); qp_DUAL_X_.resize(nx_); qp_DUAL_A_.resize(ng_); // Gradient of the objective gf_.resize(nx_); // Create Hessian update function if (!exact_hessian_) { // Create expressions corresponding to Bk, x, x_old, gLag and gLag_old SX Bk = SX::sym("Bk", H_sparsity); SX x = SX::sym("x", input(NLP_SOLVER_X0).sparsity()); SX x_old = SX::sym("x", x.sparsity()); SX gLag = SX::sym("gLag", x.sparsity()); SX gLag_old = SX::sym("gLag_old", x.sparsity()); SX sk = x - x_old; SX yk = gLag - gLag_old; SX qk = mul(Bk, sk); // Calculating theta SX skBksk = inner_prod(sk, qk); SX omega = if_else(inner_prod(yk, sk) < 0.2 * inner_prod(sk, qk), 0.8 * skBksk / (skBksk - inner_prod(sk, yk)), 1); yk = omega * yk + (1 - omega) * qk; SX theta = 1. / inner_prod(sk, yk); SX phi = 1. / inner_prod(qk, sk); SX Bk_new = Bk + theta * mul(yk, yk.T()) - phi * mul(qk, qk.T()); // Inputs of the BFGS update function vector<SX> bfgs_in(BFGS_NUM_IN); bfgs_in[BFGS_BK] = Bk; bfgs_in[BFGS_X] = x; bfgs_in[BFGS_X_OLD] = x_old; bfgs_in[BFGS_GLAG] = gLag; bfgs_in[BFGS_GLAG_OLD] = gLag_old; bfgs_ = SXFunction("bfgs", bfgs_in, make_vector(Bk_new)); // Initial Hessian approximation B_init_ = DMatrix::eye(nx_); } // Header if (static_cast<bool>(getOption("print_header"))) { userOut() << "-------------------------------------------" << endl << "This is casadi::SQPMethod." << endl; if (exact_hessian_) { userOut() << "Using exact Hessian" << endl; } else { userOut() << "Using limited memory BFGS Hessian approximation" << endl; } userOut() << endl << "Number of variables: " << setw(9) << nx_ << endl << "Number of constraints: " << setw(9) << ng_ << endl << "Number of nonzeros in constraint Jacobian: " << setw(9) << A_sparsity.nnz() << endl << "Number of nonzeros in Lagrangian Hessian: " << setw(9) << H_sparsity.nnz() << endl << endl; } }
void SQPInternal::init(){ // Call the init method of the base class NLPSolverInternal::init(); // Read options maxiter_ = getOption("maxiter"); maxiter_ls_ = getOption("maxiter_ls"); c1_ = getOption("c1"); beta_ = getOption("beta"); merit_memsize_ = getOption("merit_memory"); lbfgs_memory_ = getOption("lbfgs_memory"); tol_pr_ = getOption("tol_pr"); tol_du_ = getOption("tol_du"); regularize_ = getOption("regularize"); if(getOption("hessian_approximation")=="exact") hess_mode_ = HESS_EXACT; else if(getOption("hessian_approximation")=="limited-memory") hess_mode_ = HESS_BFGS; if (hess_mode_== HESS_EXACT && H_.isNull()) { if (!getOption("generate_hessian")){ casadi_error("SQPInternal::evaluate: you set option 'hessian_approximation' to 'exact', but no hessian was supplied. Try with option \"generate_hessian\"."); } } // If the Hessian is generated, we use exact approximation by default if (bool(getOption("generate_hessian"))){ setOption("hessian_approximation", "exact"); } // Allocate a QP solver CRSSparsity H_sparsity = hess_mode_==HESS_EXACT ? H_.output().sparsity() : sp_dense(n_,n_); H_sparsity = H_sparsity + DMatrix::eye(n_).sparsity(); CRSSparsity A_sparsity = J_.isNull() ? CRSSparsity(0,n_,false) : J_.output().sparsity(); QPSolverCreator qp_solver_creator = getOption("qp_solver"); qp_solver_ = qp_solver_creator(H_sparsity,A_sparsity); // Set options if provided if(hasSetOption("qp_solver_options")){ Dictionary qp_solver_options = getOption("qp_solver_options"); qp_solver_.setOption(qp_solver_options); } qp_solver_.init(); // Lagrange multipliers of the NLP mu_.resize(m_); mu_x_.resize(n_); // Lagrange gradient in the next iterate gLag_.resize(n_); gLag_old_.resize(n_); // Current linearization point x_.resize(n_); x_cand_.resize(n_); x_old_.resize(n_); // Constraint function value gk_.resize(m_); gk_cand_.resize(m_); // Hessian approximation Bk_ = DMatrix(H_sparsity); // Jacobian Jk_ = DMatrix(A_sparsity); // Bounds of the QP qp_LBA_.resize(m_); qp_UBA_.resize(m_); qp_LBX_.resize(n_); qp_UBX_.resize(n_); // QP solution dx_.resize(n_); qp_DUAL_X_.resize(n_); qp_DUAL_A_.resize(m_); // Gradient of the objective gf_.resize(n_); // Create Hessian update function if(hess_mode_ == HESS_BFGS){ // Create expressions corresponding to Bk, x, x_old, gLag and gLag_old SXMatrix Bk = ssym("Bk",H_sparsity); SXMatrix x = ssym("x",input(NLP_X_INIT).sparsity()); SXMatrix x_old = ssym("x",x.sparsity()); SXMatrix gLag = ssym("gLag",x.sparsity()); SXMatrix gLag_old = ssym("gLag_old",x.sparsity()); SXMatrix sk = x - x_old; SXMatrix yk = gLag - gLag_old; SXMatrix qk = mul(Bk, sk); // Calculating theta SXMatrix skBksk = inner_prod(sk, qk); SXMatrix omega = if_else(inner_prod(yk, sk) < 0.2 * inner_prod(sk, qk), 0.8 * skBksk / (skBksk - inner_prod(sk, yk)), 1); yk = omega * yk + (1 - omega) * qk; SXMatrix theta = 1. / inner_prod(sk, yk); SXMatrix phi = 1. / inner_prod(qk, sk); SXMatrix Bk_new = Bk + theta * mul(yk, trans(yk)) - phi * mul(qk, trans(qk)); // Inputs of the BFGS update function vector<SXMatrix> bfgs_in(BFGS_NUM_IN); bfgs_in[BFGS_BK] = Bk; bfgs_in[BFGS_X] = x; bfgs_in[BFGS_X_OLD] = x_old; bfgs_in[BFGS_GLAG] = gLag; bfgs_in[BFGS_GLAG_OLD] = gLag_old; bfgs_ = SXFunction(bfgs_in,Bk_new); bfgs_.setOption("number_of_fwd_dir",0); bfgs_.setOption("number_of_adj_dir",0); bfgs_.init(); // Initial Hessian approximation B_init_ = DMatrix::eye(n_); } // Header if(bool(getOption("print_header"))){ cout << "-------------------------------------------" << endl; cout << "This is CasADi::SQPMethod." << endl; switch (hess_mode_) { case HESS_EXACT: cout << "Using exact Hessian" << endl; break; case HESS_BFGS: cout << "Using limited memory BFGS Hessian approximation" << endl; break; } cout << endl; cout << "Number of variables: " << setw(9) << n_ << endl; cout << "Number of constraints: " << setw(9) << m_ << endl; cout << "Number of nonzeros in constraint Jacobian: " << setw(9) << A_sparsity.size() << endl; cout << "Number of nonzeros in Lagrangian Hessian: " << setw(9) << H_sparsity.size() << endl; cout << endl; } }