Пример #1
0
DirectCollocationInternal::DirectCollocationInternal(const FX& ffcn, const FX& mfcn, const FX& cfcn, const FX& rfcn) : OCPSolverInternal(ffcn, mfcn, cfcn, rfcn){
  addOption("nlp_solver",               	OT_NLPSOLVER,  GenericType(), "An NLPSolver creator function");
  addOption("nlp_solver_options",       	OT_DICTIONARY, GenericType(), "Options to be passed to the NLP Solver");
  addOption("interpolation_order",          OT_INTEGER,  3,  "Order of the interpolating polynomials");
  addOption("collocation_scheme",           OT_STRING,  "radau",  "Collocation scheme","radau|legendre");
  casadi_warning("CasADi::DirectCollocation is still experimental");
}
Пример #2
0
  void SymbolicQr::generateBody(std::ostream &stream, const std::string& type,
                                        CodeGenerator& gen) const {
    casadi_warning("Code generation for SymbolicQR still experimental");

    // Data structures to hold A, Q and R
    stream << "  static int prepared = 0;" << endl;
    stream << "  static d A[" << input(LINSOL_A).size() << "];" << endl;
    stream << "  static d Q[" << Q_.size() << "];" << endl;
    stream << "  static d R[" << R_.size() << "];" << endl;

    // Check if the factorization is up-to-date
    stream << "  int i;" << endl;
    stream << "  for (i=0; prepared && i<" << input(LINSOL_A).size()
           << "; ++i) prepared=A[i]!=x0[i];" << endl;

    // Factorize if needed
    int fact_ind = gen.getDependency(fact_fcn_);
    stream << "  if (!prepared) {" << endl;
    stream << "    for (i=0; i<" << input(LINSOL_A).size() << "; ++i) A[i]=x0[i];" << endl;
    stream << "    f" << fact_ind << "(A, Q, R);" << endl;
    stream << "    prepared = 1;" << endl;
    stream << "  }" << endl;

    // Solve
    int solv_ind_N = gen.getDependency(solv_fcn_N_);
    int neq = input(LINSOL_B).size1();
    int nrhs = input(LINSOL_B).size2();
    stream << "  for (i=0; i<" << nrhs << "; ++i) {" << endl;
    stream << "    f" << solv_ind_N << "(Q, R, x1, r0);" << endl;
    stream << "    x1+=" << neq << "; r0+=" << neq << ";" << endl;
    stream << "  }" << endl;
  }
Пример #3
0
 SnoptMemory::~SnoptMemory() {
   // Remove from memory pool
   auto mem_it = std::find(mempool.begin(), mempool.end(), this);
   if (mem_it==mempool.end()) {
     casadi_warning("SNOPT memory pool failure");
   } else {
     *mem_it = nullptr;
   }
 }
int main(){
  
  // Warning
  casadi_warning("This function will fail.");
  
  casadi_warning("This function will fail as sure as 1+1 ==" << "2");
  
  // No warning here
  casadi_assert_warning(0==0, "Not here.");
  
  // Warning due to failed assert
  casadi_assert_warning(1==0, "I am telling you, it WILL fail.");
  
  // Recursive error
  casadi_assert(bad_test4());

  return 0;
}
Пример #5
0
  ShellCompiler::~ShellCompiler() {
    // Unload
    if (handle_) dlclose(handle_);

    // Delete the temporary file
    std::string rmcmd = "rm " + bin_name_;
    if (system(rmcmd.c_str())) {
      casadi_warning("Failed to delete temporary file:" + bin_name_);
    }
  }
Пример #6
0
void KinsolSolver::setLinearSolver(const LinearSolver& linsol){
  casadi_warning(
    "Depreciated function \"KinsolSolver::setLinearSolver\",\n"
    "use setOption(\"linear solver_creator\",SolverName::creator) in C++ \n"
    "or setOption(\"linear solver_creator\",SolverName) in Python/Octave instead.\n"
    "Options to the linear solver are passed with setOption(\"linear solver_options\",...)\n"
    "This function will be removed in the next release"
  );
  (*this)->setLinearSolver(linsol);
}
Пример #7
0
void NLPSolverInternal::checkInitialBounds() { 
  if(bool(getOption("warn_initial_bounds"))){
    bool violated = false;
    for (int k=0;k<input(NLP_X_INIT).size();++k) {
      if (input(NLP_X_INIT).at(k)>input(NLP_UBX).at(k)) {
        violated = true;
      }
      if (input(NLP_X_INIT).at(k)<input(NLP_LBX).at(k)) {
        violated = true;
      }
    }
    if (violated) casadi_warning("NLPSolver: The initial guess does not satisfy LBX and UBX. Option 'warn_initial_bounds' controls this warning.");
  }
}
Пример #8
0
SQPInternal::SQPInternal(const FX& F, const FX& G, const FX& H, const FX& J) : NLPSolverInternal(F,G,H,J){
  casadi_warning("The SQP method is under development");
  addOption("qp_solver",         OT_QPSOLVER,   GenericType(),    "The QP solver to be used by the SQP method");
  addOption("qp_solver_options", OT_DICTIONARY, GenericType(),    "Options to be passed to the QP solver");
  addOption("hessian_approximation", OT_STRING, "limited-memory", "limited-memory|exact");
  addOption("maxiter",           OT_INTEGER,      50,             "Maximum number of SQP iterations");
  addOption("maxiter_ls",        OT_INTEGER,       3,             "Maximum number of linesearch iterations");
  addOption("tol_pr",            OT_REAL,       1e-6,             "Stopping criterion for primal infeasibility");
  addOption("tol_du",            OT_REAL,       1e-6,             "Stopping criterion for dual infeasability");
  addOption("c1",                OT_REAL,       1E-4,             "Armijo condition, coefficient of decrease in merit");
  addOption("beta",              OT_REAL,       0.8,              "Line-search parameter, restoration factor of stepsize");
  addOption("merit_memory",      OT_INTEGER,      4,              "Size of memory to store history of merit function values");
  addOption("lbfgs_memory",      OT_INTEGER,     10,              "Size of L-BFGS memory.");
  addOption("regularize",        OT_BOOLEAN,  false,              "Automatic regularization of Lagrange Hessian.");
  addOption("print_header",      OT_BOOLEAN,   true,              "Print the header with problem statistics");
  
  // Monitors
  addOption("monitor",      OT_STRINGVECTOR, GenericType(),  "", "eval_f|eval_g|eval_jac_g|eval_grad_f|eval_h|qp|dx", true);
}
Пример #9
0
LiftedSQPInternal::LiftedSQPInternal(const Function& F, const Function& G) : NlpSolverInternal(Function(),F,G){
  casadi_warning("casadi::LiftedSQP has been replaced by casadi::SCPgen. This class will be deleted.");
  addOption("qp_solver",         OT_QPSOLVER,   GenericType(), "The QP solver to be used by the SQP method");
  addOption("qp_solver_options", OT_DICTIONARY, GenericType(), "Options to be passed to the QP solver");
  addOption("max_iter",           OT_INTEGER,    100,           "Maximum number of SQP iterations");
  addOption("max_iter_ls",        OT_INTEGER,    100,           "Maximum number of linesearch iterations");
  addOption("toldx",             OT_REAL   ,    1e-12,         "Stopping criterion for the stepsize");
  addOption("tolgl",             OT_REAL   ,    1e-12,         "Stopping criterion for the Lagrangian gradient");
  addOption("sigma",             OT_REAL   ,    1.0,           "Linesearch parameter");
  addOption("rho",               OT_REAL   ,    0.5,           "Linesearch parameter");
  addOption("mu_safety",         OT_REAL   ,    1.1,           "Safety factor for linesearch mu");
  addOption("eta",               OT_REAL   ,    0.0001,        "Linesearch parameter: See Nocedal 3.4");
  addOption("tau",               OT_REAL   ,    0.2,           "Linesearch parameter");
  addOption("hessian_approximation", OT_STRING, "BFGS",        "BFGS|exact");
  addOption("num_lifted",        OT_INTEGER,   0,              "Number of variables to lift");
  
  // Monitors
  addOption("monitor",      OT_STRINGVECTOR, GenericType(),  "", "eval_f|eval_g|eval_jac_g|eval_grad_f|eval_h|qp", true);
  
  // Set default options
  setOption("generate_jacobian", false);
}
Пример #10
0
  void Function::generateCode(const string& filename, bool generate_main) {
    // Detect a C++ ending
    vector<string> cpp_endings;
    cpp_endings.push_back(".cpp");
    cpp_endings.push_back(".cxx");
    cpp_endings.push_back(".cc");
    cpp_endings.push_back(".cp");
    cpp_endings.push_back(".c++");
    for (vector<string>::const_iterator it=cpp_endings.begin(); it!=cpp_endings.end(); ++it) {
      if (filename.size()>it->size() &&
         filename.compare(filename.size()-it->size(), it->size(), *it)==0) {
        casadi_warning("Function::generateCode: Detected C++ file ending "
                       "(generated code is C, not C++)");
      }
    }

    // Create a file
    std::ofstream cfile;
    cfile.open(filename.c_str());
    generateCode(cfile, generate_main);
    cfile.close();
  }
Пример #11
0
  Sqpmethod::Sqpmethod(const Function& nlp) : NlpSolverInternal(nlp) {
    casadi_warning("The SQP method is under development");
    addOption("qp_solver",         OT_STRING,   GenericType(),
              "The QP solver to be used by the SQP method");
    addOption("qp_solver_options", OT_DICT, GenericType(),
              "Options to be passed to the QP solver");
    addOption("hessian_approximation", OT_STRING, "exact",
              "limited-memory|exact");
    addOption("max_iter",           OT_INTEGER,      50,
              "Maximum number of SQP iterations");
    addOption("max_iter_ls",        OT_INTEGER,       3,
              "Maximum number of linesearch iterations");
    addOption("tol_pr",            OT_REAL,       1e-6,
              "Stopping criterion for primal infeasibility");
    addOption("tol_du",            OT_REAL,       1e-6,
              "Stopping criterion for dual infeasability");
    addOption("c1",                OT_REAL,       1E-4,
              "Armijo condition, coefficient of decrease in merit");
    addOption("beta",              OT_REAL,       0.8,
              "Line-search parameter, restoration factor of stepsize");
    addOption("merit_memory",      OT_INTEGER,      4,
              "Size of memory to store history of merit function values");
    addOption("lbfgs_memory",      OT_INTEGER,     10,
              "Size of L-BFGS memory.");
    addOption("regularize",        OT_BOOLEAN,  false,
              "Automatic regularization of Lagrange Hessian.");
    addOption("print_header",      OT_BOOLEAN,   true,
              "Print the header with problem statistics");
    addOption("min_step_size",     OT_REAL,   1e-10,
              "The size (inf-norm) of the step size should not become smaller than this.");

    // Monitors
    addOption("monitor",      OT_STRINGVECTOR, GenericType(),  "",
              "eval_f|eval_g|eval_jac_g|eval_grad_f|eval_h|qp|dx|bfgs", true);
    addOption("print_time",         OT_BOOLEAN,       true,
              "Print information about execution time");
  }
Пример #12
0
void SQPInternal::evaluate(int nfdir, int nadir){
  casadi_assert(nfdir==0 && nadir==0);
  
  checkInitialBounds();
  
  // Get problem data
  const vector<double>& x_init = input(NLP_X_INIT).data();
  const vector<double>& lbx = input(NLP_LBX).data();
  const vector<double>& ubx = input(NLP_UBX).data();
  const vector<double>& lbg = input(NLP_LBG).data();
  const vector<double>& ubg = input(NLP_UBG).data();
  
  // Set the static parameter
  if (parametric_) {
    const vector<double>& p = input(NLP_P).data();
    if (!F_.isNull()) F_.setInput(p,F_.getNumInputs()-1);
    if (!G_.isNull()) G_.setInput(p,G_.getNumInputs()-1);
    if (!H_.isNull()) H_.setInput(p,H_.getNumInputs()-1);
    if (!J_.isNull()) J_.setInput(p,J_.getNumInputs()-1);
  }
    
  // Set linearization point to initial guess
  copy(x_init.begin(),x_init.end(),x_.begin());
  
  // Lagrange multipliers of the NLP
  fill(mu_.begin(),mu_.end(),0);
  fill(mu_x_.begin(),mu_x_.end(),0);

  // Initial constraint Jacobian
  eval_jac_g(x_,gk_,Jk_);
  
  // Initial objective gradient
  eval_grad_f(x_,fk_,gf_);
  
  // Initialize or reset the Hessian or Hessian approximation
  reg_ = 0;
  if( hess_mode_ == HESS_BFGS){
    reset_h();
  } else {
    eval_h(x_,mu_,1.0,Bk_);
  }

  // Evaluate the initial gradient of the Lagrangian
  copy(gf_.begin(),gf_.end(),gLag_.begin());
  if(m_>0) DMatrix::mul_no_alloc_tn(Jk_,mu_,gLag_);
  // gLag += mu_x_;
  transform(gLag_.begin(),gLag_.end(),mu_x_.begin(),gLag_.begin(),plus<double>());

  // Number of SQP iterations
  int iter = 0;

  // Number of line-search iterations
  int ls_iter = 0;
  
  // Last linesearch successfull
  bool ls_success = true;
  
  // Reset
  merit_mem_.clear();
  sigma_ = 0.;    // NOTE: Move this into the main optimization loop

  // Default stepsize
  double t = 0;
  
  // MAIN OPTIMIZATION LOOP
  while(true){
    
    // Primal infeasability
    double pr_inf = primalInfeasibility(x_, lbx, ubx, gk_, lbg, ubg);
    
    // 1-norm of lagrange gradient
    double gLag_norm1 = norm_1(gLag_);
    
    // 1-norm of step
    double dx_norm1 = norm_1(dx_);
    
    // Print header occasionally
    if(iter % 10 == 0) printIteration(cout);
    
    // Printing information about the actual iterate
    printIteration(cout,iter,fk_,pr_inf,gLag_norm1,dx_norm1,reg_,ls_iter,ls_success);
    
    // Call callback function if present
    if (!callback_.isNull()) {
      callback_.input(NLP_COST).set(fk_);
      callback_.input(NLP_X_OPT).set(x_);
      callback_.input(NLP_LAMBDA_G).set(mu_);
      callback_.input(NLP_LAMBDA_X).set(mu_x_);
      callback_.input(NLP_G).set(gk_);
      callback_.evaluate();
      
      if (callback_.output(0).at(0)) {
        cout << endl;
        cout << "CasADi::SQPMethod: aborted by callback..." << endl;
        break;
      }
    }
    
    // Checking convergence criteria
    if (pr_inf < tol_pr_ && gLag_norm1 < tol_du_){
      cout << endl;
      cout << "CasADi::SQPMethod: Convergence achieved after " << iter << " iterations." << endl;
      break;
    }
    
    if (iter >= maxiter_){
      cout << endl;
      cout << "CasADi::SQPMethod: Maximum number of iterations reached." << endl;
      break;
    }
    
    // Start a new iteration
    iter++;
    
    // Formulate the QP
    transform(lbx.begin(),lbx.end(),x_.begin(),qp_LBX_.begin(),minus<double>());
    transform(ubx.begin(),ubx.end(),x_.begin(),qp_UBX_.begin(),minus<double>());
    transform(lbg.begin(),lbg.end(),gk_.begin(),qp_LBA_.begin(),minus<double>());
    transform(ubg.begin(),ubg.end(),gk_.begin(),qp_UBA_.begin(),minus<double>());

    // Solve the QP
    solve_QP(Bk_,gf_,qp_LBX_,qp_UBX_,Jk_,qp_LBA_,qp_UBA_,dx_,qp_DUAL_X_,qp_DUAL_A_);
    log("QP solved");

    // Detecting indefiniteness
    double gain = quad_form(dx_,Bk_);
    if (gain < 0){
      casadi_warning("Indefinite Hessian detected...");
    }
        
    // Calculate penalty parameter of merit function
    sigma_ = std::max(sigma_,1.01*norm_inf(qp_DUAL_X_));
    sigma_ = std::max(sigma_,1.01*norm_inf(qp_DUAL_A_));

    // Calculate L1-merit function in the actual iterate
    double l1_infeas = primalInfeasibility(x_, lbx, ubx, gk_, lbg, ubg);

    // Right-hand side of Armijo condition
    double F_sens = inner_prod(dx_, gf_);    
    double L1dir = F_sens - sigma_ * l1_infeas;
    double L1merit = fk_ + sigma_ * l1_infeas;

    // Storing the actual merit function value in a list
    merit_mem_.push_back(L1merit);
    if (merit_mem_.size() > merit_memsize_){
      merit_mem_.pop_front();
    }
    // Stepsize
    t = 1.0;
    double fk_cand;
    // Merit function value in candidate
    double L1merit_cand = 0;

    // Reset line-search counter, success marker
    ls_iter = 0;
    ls_success = true;

    // Line-search
    log("Starting line-search");
    if(maxiter_ls_>0){ // maxiter_ls_== 0 disables line-search
      
      // Line-search loop
      while (true){
        for(int i=0; i<n_; ++i) x_cand_[i] = x_[i] + t * dx_[i];
      
        // Evaluating objective and constraints
        eval_f(x_cand_,fk_cand);
        eval_g(x_cand_,gk_cand_);
        ls_iter++;

        // Calculating merit-function in candidate
        l1_infeas = primalInfeasibility(x_cand_, lbx, ubx, gk_cand_, lbg, ubg);
      
        L1merit_cand = fk_cand + sigma_ * l1_infeas;
        // Calculating maximal merit function value so far
        double meritmax = *max_element(merit_mem_.begin(), merit_mem_.end());
        if (L1merit_cand <= meritmax + t * c1_ * L1dir){
          // Accepting candidate
	  log("Line-search completed, candidate accepted");
          break;
        }
      
        // Line-search not successful, but we accept it.
        if(ls_iter == maxiter_ls_){
          ls_success = false;
	  log("Line-search completed, maximum number of iterations");
          break;
        }
      
        // Backtracking
        t = beta_ * t;
      }
    }

    // Candidate accepted, update dual variables
    for(int i=0; i<m_; ++i) mu_[i] = t * qp_DUAL_A_[i] + (1 - t) * mu_[i];
    for(int i=0; i<n_; ++i) mu_x_[i] = t * qp_DUAL_X_[i] + (1 - t) * mu_x_[i];
    
    if( hess_mode_ == HESS_BFGS){
      // Evaluate the gradient of the Lagrangian with the old x but new mu (for BFGS)
      copy(gf_.begin(),gf_.end(),gLag_old_.begin());
      if(m_>0) DMatrix::mul_no_alloc_tn(Jk_,mu_,gLag_old_);
      // gLag_old += mu_x_;
      transform(gLag_old_.begin(),gLag_old_.end(),mu_x_.begin(),gLag_old_.begin(),plus<double>());
    }
    
    // Candidate accepted, update the primal variable
    copy(x_.begin(),x_.end(),x_old_.begin());
    copy(x_cand_.begin(),x_cand_.end(),x_.begin());

    // Evaluate the constraint Jacobian
    log("Evaluating jac_g");
    eval_jac_g(x_,gk_,Jk_);
    
    // Evaluate the gradient of the objective function
    log("Evaluating grad_f");
    eval_grad_f(x_,fk_,gf_);
    
    // Evaluate the gradient of the Lagrangian with the new x and new mu
    copy(gf_.begin(),gf_.end(),gLag_.begin());
    if(m_>0) DMatrix::mul_no_alloc_tn(Jk_,mu_,gLag_);
    // gLag += mu_x_;
    transform(gLag_.begin(),gLag_.end(),mu_x_.begin(),gLag_.begin(),plus<double>());

    // Updating Lagrange Hessian
    if( hess_mode_ == HESS_BFGS){
      log("Updating Hessian (BFGS)");
      // BFGS with careful updates and restarts
      if (iter % lbfgs_memory_ == 0){
        // Reset Hessian approximation by dropping all off-diagonal entries
        const vector<int>& rowind = Bk_.rowind();      // Access sparsity (row offset)
        const vector<int>& col = Bk_.col();            // Access sparsity (column)
        vector<double>& data = Bk_.data();             // Access nonzero elements
        for(int i=0; i<rowind.size()-1; ++i){          // Loop over the rows of the Hessian
          for(int el=rowind[i]; el<rowind[i+1]; ++el){ // Loop over the nonzero elements of the row
            if(i!=col[el]) data[el] = 0;               // Remove if off-diagonal entries
          }
        }
      }
      
      // Pass to BFGS update function
      bfgs_.setInput(Bk_,BFGS_BK);
      bfgs_.setInput(x_,BFGS_X);
      bfgs_.setInput(x_old_,BFGS_X_OLD);
      bfgs_.setInput(gLag_,BFGS_GLAG);
      bfgs_.setInput(gLag_old_,BFGS_GLAG_OLD);
      
      // Update the Hessian approximation
      bfgs_.evaluate();
      
      // Get the updated Hessian
      bfgs_.getOutput(Bk_);
    } else {
      // Exact Hessian
      log("Evaluating hessian");
      eval_h(x_,mu_,1.0,Bk_);
    }
  }
  
  // Save results to outputs
  output(NLP_COST).set(fk_);
  output(NLP_X_OPT).set(x_);
  output(NLP_LAMBDA_G).set(mu_);
  output(NLP_LAMBDA_X).set(mu_x_);
  output(NLP_G).set(gk_);
  
  // Save statistics
  stats_["iter_count"] = iter;
}
Пример #13
0
void ParallelizerInternal::init(){
  // Get mode
  if(getOption("parallelization")=="serial"){
    mode_ = SERIAL;
  } else if(getOption("parallelization")=="openmp") {
    mode_ = OPENMP;
  } else if(getOption("parallelization")=="mpi") {
    mode_ = MPI;
  } else {
    casadi_error("Parallelization mode " << getOption("parallelization") << " unknown.");
  }

  // Switch to serial mode if OPENMP is not supported
  #ifndef WITH_OPENMP
  if(mode_ == OPENMP){
    casadi_warning("OpenMP parallelization is not available, switching to serial mode. Recompile CasADi setting the option WITH_OPENMP to ON.");
    mode_ = SERIAL;
  }
  #endif // WITH_OPENMP
  
  
  // Check if a node is a copy of another
  copy_of_.resize(funcs_.size(),-1);
  map<void*,int> is_copy_of;
  for(int i=0; i<funcs_.size(); ++i){
    // Check if the function has already been assigned an index
    map<void*,int>::const_iterator it=is_copy_of.find(funcs_[i].get());
    if(it!=is_copy_of.end()){
      copy_of_[i] = it->second;
    } else {
      is_copy_of[funcs_[i].get()] = i;
    }
  }
  
  // Initialize the dependend functions
  for(vector<FX>::iterator it=funcs_.begin(); it!=funcs_.end(); ++it){
    // Initialize
    if(!it->isInit())
      it->init();
    
    // Make sure that the functions are unique if we are using OpenMP
    if(mode_==OPENMP && it!=funcs_.begin())
      it->makeUnique();
    
  }
  
  // Clear the indices
  inind_.clear();   inind_.push_back(0);
  outind_.clear();  outind_.push_back(0);
  
  // Add the inputs and outputs
  for(vector<FX>::iterator it=funcs_.begin(); it!=funcs_.end(); ++it){
    inind_.push_back(inind_.back()+it->getNumInputs());
    outind_.push_back(outind_.back()+it->getNumOutputs());
  }
  setNumInputs(inind_.back());
  setNumOutputs(outind_.back());
  
  // Copy inputs and output dimensions and structure
  for(int i=0; i<funcs_.size(); ++i){
    for(int j=inind_[i]; j<inind_[i+1]; ++j)
      input(j) = funcs_[i].input(j-inind_[i]);
    for(int j=outind_[i]; j<outind_[i+1]; ++j)
      output(j) = funcs_[i].output(j-outind_[i]);
  }
  
  // Call the init function of the base class
  FXInternal::init();
  
  // Should corrected input values be saved after evaluation?
  save_corrected_input_ = getOption("save_corrected_input");
  
  // Mark the subsequent call as the "first"
  first_call_ = true;
}
Пример #14
0
  int BonminInterface::solve(void* mem) const {
    auto m = static_cast<BonminMemory*>(mem);

    // Reset statistics
    m->inf_pr.clear();
    m->inf_du.clear();
    m->mu.clear();
    m->d_norm.clear();
    m->regularization_size.clear();
    m->alpha_pr.clear();
    m->alpha_du.clear();
    m->obj.clear();
    m->ls_trials.clear();

    // Reset number of iterations
    m->n_iter = 0;

    // MINLP instance
    SmartPtr<BonminUserClass> tminlp = new BonminUserClass(*this, m);

    BonMinMessageHandler mh;

    // Start an BONMIN application
    BonminSetup bonmin(&mh);

    SmartPtr<OptionsList> options = new OptionsList();
    SmartPtr<Journalist> journalist= new Journalist();
    SmartPtr<Bonmin::RegisteredOptions> roptions = new Bonmin::RegisteredOptions();

    {
      // Direct output through casadi::uout()
      StreamJournal* jrnl_raw = new StreamJournal("console", J_ITERSUMMARY);
      jrnl_raw->SetOutputStream(&casadi::uout());
      jrnl_raw->SetPrintLevel(J_DBG, J_NONE);
      SmartPtr<Journal> jrnl = jrnl_raw;
      journalist->AddJournal(jrnl);
    }

    options->SetJournalist(journalist);
    options->SetRegisteredOptions(roptions);
    bonmin.setOptionsAndJournalist(roptions, options, journalist);
    bonmin.registerOptions();
    // Get all options available in BONMIN
    auto regops = bonmin.roptions()->RegisteredOptionsList();

    // Pass all the options to BONMIN
    for (auto&& op : opts_) {
      // Find the option
      auto regops_it = regops.find(op.first);
      if (regops_it==regops.end()) {
        casadi_error("No such BONMIN option: " + op.first);
      }

      // Get the type
      Ipopt::RegisteredOptionType ipopt_type = regops_it->second->Type();

      // Pass to BONMIN
      bool ret;
      switch (ipopt_type) {
      case Ipopt::OT_Number:
        ret = bonmin.options()->SetNumericValue(op.first, op.second.to_double(), false);
        break;
      case Ipopt::OT_Integer:
        ret = bonmin.options()->SetIntegerValue(op.first, op.second.to_int(), false);
        break;
      case Ipopt::OT_String:
        ret = bonmin.options()->SetStringValue(op.first, op.second.to_string(), false);
        break;
      case Ipopt::OT_Unknown:
      default:
        casadi_warning("Cannot handle option \"" + op.first + "\", ignored");
        continue;
      }
      if (!ret) casadi_error("Invalid options were detected by BONMIN.");
    }

    // Initialize
    bonmin.initialize(GetRawPtr(tminlp));

    if (true) {
      // Branch-and-bound
      try {
        Bab bb;
        bb(bonmin);
      } catch (CoinError& e) {
        casadi_error("CoinError occured: " + to_str(e));
      }
    }

    // Save results to outputs
    casadi_copy(m->gk, ng_, m->g);
    return 0;
  }
Пример #15
0
  void Sqpmethod::evaluate() {
    if (inputs_check_) checkInputs();
    checkInitialBounds();

    if (gather_stats_) {
      Dict iterations;
      iterations["inf_pr"] = std::vector<double>();
      iterations["inf_du"] = std::vector<double>();
      iterations["ls_trials"] = std::vector<double>();
      iterations["d_norm"] = std::vector<double>();
      iterations["obj"] = std::vector<double>();
      stats_["iterations"] = iterations;
    }


    // Get problem data
    const vector<double>& x_init = input(NLP_SOLVER_X0).data();
    const vector<double>& lbx = input(NLP_SOLVER_LBX).data();
    const vector<double>& ubx = input(NLP_SOLVER_UBX).data();
    const vector<double>& lbg = input(NLP_SOLVER_LBG).data();
    const vector<double>& ubg = input(NLP_SOLVER_UBG).data();

    // Set linearization point to initial guess
    copy(x_init.begin(), x_init.end(), x_.begin());

    // Initialize Lagrange multipliers of the NLP
    copy(input(NLP_SOLVER_LAM_G0).begin(), input(NLP_SOLVER_LAM_G0).end(), mu_.begin());
    copy(input(NLP_SOLVER_LAM_X0).begin(), input(NLP_SOLVER_LAM_X0).end(), mu_x_.begin());

    t_eval_f_ = t_eval_grad_f_ = t_eval_g_ = t_eval_jac_g_ = t_eval_h_ =
        t_callback_fun_ = t_callback_prepare_ = t_mainloop_ = 0;

    n_eval_f_ = n_eval_grad_f_ = n_eval_g_ = n_eval_jac_g_ = n_eval_h_ = 0;

    double time1 = clock();

    // Initial constraint Jacobian
    eval_jac_g(x_, gk_, Jk_);

    // Initial objective gradient
    eval_grad_f(x_, fk_, gf_);

    // Initialize or reset the Hessian or Hessian approximation
    reg_ = 0;
    if (exact_hessian_) {
      eval_h(x_, mu_, 1.0, Bk_);
    } else {
      reset_h();
    }

    // Evaluate the initial gradient of the Lagrangian
    copy(gf_.begin(), gf_.end(), gLag_.begin());
    if (ng_>0) casadi_mv_t(Jk_.ptr(), Jk_.sparsity(), getPtr(mu_), getPtr(gLag_));
    // gLag += mu_x_;
    transform(gLag_.begin(), gLag_.end(), mu_x_.begin(), gLag_.begin(), plus<double>());

    // Number of SQP iterations
    int iter = 0;

    // Number of line-search iterations
    int ls_iter = 0;

    // Last linesearch successfull
    bool ls_success = true;

    // Reset
    merit_mem_.clear();
    sigma_ = 0.;    // NOTE: Move this into the main optimization loop

    // Default stepsize
    double t = 0;

    // MAIN OPTIMIZATION LOOP
    while (true) {

      // Primal infeasability
      double pr_inf = primalInfeasibility(x_, lbx, ubx, gk_, lbg, ubg);

      // inf-norm of lagrange gradient
      double gLag_norminf = norm_inf(gLag_);

      // inf-norm of step
      double dx_norminf = norm_inf(dx_);

      // Print header occasionally
      if (iter % 10 == 0) printIteration(userOut());

      // Printing information about the actual iterate
      printIteration(userOut(), iter, fk_, pr_inf, gLag_norminf, dx_norminf,
                     reg_, ls_iter, ls_success);

      if (gather_stats_) {
        Dict iterations = stats_["iterations"];
        std::vector<double> tmp=iterations["inf_pr"];
        tmp.push_back(pr_inf);
        iterations["inf_pr"] = tmp;

        tmp=iterations["inf_du"];
        tmp.push_back(gLag_norminf);
        iterations["inf_du"] = tmp;

        tmp=iterations["d_norm"];
        tmp.push_back(dx_norminf);
        iterations["d_norm"] = tmp;

        std::vector<int> tmp2=iterations["ls_trials"];
        tmp2.push_back(ls_iter);
        iterations["ls_trials"] = tmp2;

        tmp=iterations["obj"];
        tmp.push_back(fk_);
        iterations["obj"] = tmp;

        stats_["iterations"] = iterations;
      }

      // Call callback function if present
      if (!callback_.isNull()) {
        double time1 = clock();

        if (!output(NLP_SOLVER_F).isempty()) output(NLP_SOLVER_F).set(fk_);
        if (!output(NLP_SOLVER_X).isempty()) output(NLP_SOLVER_X).setNZ(x_);
        if (!output(NLP_SOLVER_LAM_G).isempty()) output(NLP_SOLVER_LAM_G).setNZ(mu_);
        if (!output(NLP_SOLVER_LAM_X).isempty()) output(NLP_SOLVER_LAM_X).setNZ(mu_x_);
        if (!output(NLP_SOLVER_G).isempty()) output(NLP_SOLVER_G).setNZ(gk_);

        Dict iteration;
        iteration["iter"] = iter;
        iteration["inf_pr"] = pr_inf;
        iteration["inf_du"] = gLag_norminf;
        iteration["d_norm"] = dx_norminf;
        iteration["ls_trials"] = ls_iter;
        iteration["obj"] = fk_;
        stats_["iteration"] = iteration;

        double time2 = clock();
        t_callback_prepare_ += (time2-time1)/CLOCKS_PER_SEC;
        time1 = clock();
        int ret = callback_(ref_, user_data_);
        time2 = clock();
        t_callback_fun_ += (time2-time1)/CLOCKS_PER_SEC;
        if (ret) {
          userOut() << endl;
          userOut() << "casadi::SQPMethod: aborted by callback..." << endl;
          stats_["return_status"] = "User_Requested_Stop";
          break;
        }
      }

      // Checking convergence criteria
      if (pr_inf < tol_pr_ && gLag_norminf < tol_du_) {
        userOut() << endl;
        userOut() << "casadi::SQPMethod: Convergence achieved after "
                  << iter << " iterations." << endl;
        stats_["return_status"] = "Solve_Succeeded";
        break;
      }

      if (iter >= max_iter_) {
        userOut() << endl;
        userOut() << "casadi::SQPMethod: Maximum number of iterations reached." << endl;
        stats_["return_status"] = "Maximum_Iterations_Exceeded";
        break;
      }

      if (iter > 0 && dx_norminf <= min_step_size_) {
        userOut() << endl;
        userOut() << "casadi::SQPMethod: Search direction becomes too small without "
            "convergence criteria being met." << endl;
        stats_["return_status"] = "Search_Direction_Becomes_Too_Small";
        break;
      }

      // Start a new iteration
      iter++;

      log("Formulating QP");
      // Formulate the QP
      transform(lbx.begin(), lbx.end(), x_.begin(), qp_LBX_.begin(), minus<double>());
      transform(ubx.begin(), ubx.end(), x_.begin(), qp_UBX_.begin(), minus<double>());
      transform(lbg.begin(), lbg.end(), gk_.begin(), qp_LBA_.begin(), minus<double>());
      transform(ubg.begin(), ubg.end(), gk_.begin(), qp_UBA_.begin(), minus<double>());

      // Solve the QP
      solve_QP(Bk_, gf_, qp_LBX_, qp_UBX_, Jk_, qp_LBA_, qp_UBA_, dx_, qp_DUAL_X_, qp_DUAL_A_);
      log("QP solved");

      // Detecting indefiniteness
      double gain = casadi_quad_form(Bk_.ptr(), Bk_.sparsity(), getPtr(dx_));
      if (gain < 0) {
        casadi_warning("Indefinite Hessian detected...");
      }

      // Calculate penalty parameter of merit function
      sigma_ = std::max(sigma_, 1.01*norm_inf(qp_DUAL_X_));
      sigma_ = std::max(sigma_, 1.01*norm_inf(qp_DUAL_A_));

      // Calculate L1-merit function in the actual iterate
      double l1_infeas = primalInfeasibility(x_, lbx, ubx, gk_, lbg, ubg);

      // Right-hand side of Armijo condition
      double F_sens = inner_prod(dx_, gf_);
      double L1dir = F_sens - sigma_ * l1_infeas;
      double L1merit = fk_ + sigma_ * l1_infeas;

      // Storing the actual merit function value in a list
      merit_mem_.push_back(L1merit);
      if (merit_mem_.size() > merit_memsize_) {
        merit_mem_.pop_front();
      }
      // Stepsize
      t = 1.0;
      double fk_cand;
      // Merit function value in candidate
      double L1merit_cand = 0;

      // Reset line-search counter, success marker
      ls_iter = 0;
      ls_success = true;

      // Line-search
      log("Starting line-search");
      if (max_iter_ls_>0) { // max_iter_ls_== 0 disables line-search

        // Line-search loop
        while (true) {
          for (int i=0; i<nx_; ++i) x_cand_[i] = x_[i] + t * dx_[i];

          try {
            // Evaluating objective and constraints
            eval_f(x_cand_, fk_cand);
            eval_g(x_cand_, gk_cand_);
          } catch(const CasadiException& ex) {
            // Silent ignore; line-search failed
            ls_iter++;
            // Backtracking
            t = beta_ * t;
            continue;
          }

          ls_iter++;

          // Calculating merit-function in candidate
          l1_infeas = primalInfeasibility(x_cand_, lbx, ubx, gk_cand_, lbg, ubg);

          L1merit_cand = fk_cand + sigma_ * l1_infeas;
          // Calculating maximal merit function value so far
          double meritmax = *max_element(merit_mem_.begin(), merit_mem_.end());
          if (L1merit_cand <= meritmax + t * c1_ * L1dir) {
            // Accepting candidate
            log("Line-search completed, candidate accepted");
            break;
          }

          // Line-search not successful, but we accept it.
          if (ls_iter == max_iter_ls_) {
            ls_success = false;
            log("Line-search completed, maximum number of iterations");
            break;
          }

          // Backtracking
          t = beta_ * t;
        }

        // Candidate accepted, update dual variables
        for (int i=0; i<ng_; ++i) mu_[i] = t * qp_DUAL_A_[i] + (1 - t) * mu_[i];
        for (int i=0; i<nx_; ++i) mu_x_[i] = t * qp_DUAL_X_[i] + (1 - t) * mu_x_[i];

        // Candidate accepted, update the primal variable
        copy(x_.begin(), x_.end(), x_old_.begin());
        copy(x_cand_.begin(), x_cand_.end(), x_.begin());

      } else {
        // Full step
        copy(qp_DUAL_A_.begin(), qp_DUAL_A_.end(), mu_.begin());
        copy(qp_DUAL_X_.begin(), qp_DUAL_X_.end(), mu_x_.begin());

        copy(x_.begin(), x_.end(), x_old_.begin());
        // x+=dx
        transform(x_.begin(), x_.end(), dx_.begin(), x_.begin(), plus<double>());
      }

      if (!exact_hessian_) {
        // Evaluate the gradient of the Lagrangian with the old x but new mu (for BFGS)
        copy(gf_.begin(), gf_.end(), gLag_old_.begin());
        if (ng_>0) casadi_mv_t(Jk_.ptr(), Jk_.sparsity(), getPtr(mu_), getPtr(gLag_old_));
        // gLag_old += mu_x_;
        transform(gLag_old_.begin(), gLag_old_.end(), mu_x_.begin(), gLag_old_.begin(),
                  plus<double>());
      }

      // Evaluate the constraint Jacobian
      log("Evaluating jac_g");
      eval_jac_g(x_, gk_, Jk_);

      // Evaluate the gradient of the objective function
      log("Evaluating grad_f");
      eval_grad_f(x_, fk_, gf_);

      // Evaluate the gradient of the Lagrangian with the new x and new mu
      copy(gf_.begin(), gf_.end(), gLag_.begin());
      if (ng_>0) casadi_mv_t(Jk_.ptr(), Jk_.sparsity(), getPtr(mu_), getPtr(gLag_));

      // gLag += mu_x_;
      transform(gLag_.begin(), gLag_.end(), mu_x_.begin(), gLag_.begin(), plus<double>());

      // Updating Lagrange Hessian
      if (!exact_hessian_) {
        log("Updating Hessian (BFGS)");
        // BFGS with careful updates and restarts
        if (iter % lbfgs_memory_ == 0) {
          // Reset Hessian approximation by dropping all off-diagonal entries
          const int* colind = Bk_.colind();      // Access sparsity (column offset)
          int ncol = Bk_.size2();
          const int* row = Bk_.row();            // Access sparsity (row)
          vector<double>& data = Bk_.data();             // Access nonzero elements
          for (int cc=0; cc<ncol; ++cc) {     // Loop over the columns of the Hessian
            for (int el=colind[cc]; el<colind[cc+1]; ++el) {
              // Loop over the nonzero elements of the column
              if (cc!=row[el]) data[el] = 0;               // Remove if off-diagonal entries
            }
          }
        }

        // Pass to BFGS update function
        bfgs_.setInput(Bk_, BFGS_BK);
        bfgs_.setInputNZ(x_, BFGS_X);
        bfgs_.setInputNZ(x_old_, BFGS_X_OLD);
        bfgs_.setInputNZ(gLag_, BFGS_GLAG);
        bfgs_.setInputNZ(gLag_old_, BFGS_GLAG_OLD);

        // Update the Hessian approximation
        bfgs_.evaluate();

        // Get the updated Hessian
        bfgs_.getOutput(Bk_);
        if (monitored("bfgs")) {
          userOut() << "x = " << x_ << endl;
          userOut() << "BFGS = "  << endl;
          Bk_.printSparse();
        }
      } else {
        // Exact Hessian
        log("Evaluating hessian");
        eval_h(x_, mu_, 1.0, Bk_);
      }
    }

    double time2 = clock();
    t_mainloop_ = (time2-time1)/CLOCKS_PER_SEC;

    // Save results to outputs
    output(NLP_SOLVER_F).set(fk_);
    output(NLP_SOLVER_X).setNZ(x_);
    output(NLP_SOLVER_LAM_G).setNZ(mu_);
    output(NLP_SOLVER_LAM_X).setNZ(mu_x_);
    output(NLP_SOLVER_G).setNZ(gk_);

    if (hasOption("print_time") && static_cast<bool>(getOption("print_time"))) {
      // Write timings
      userOut() << "time spent in eval_f: " << t_eval_f_ << " s.";
      if (n_eval_f_>0)
        userOut() << " (" << n_eval_f_ << " calls, " << (t_eval_f_/n_eval_f_)*1000
                  << " ms. average)";
      userOut() << endl;
      userOut() << "time spent in eval_grad_f: " << t_eval_grad_f_ << " s.";
      if (n_eval_grad_f_>0)
        userOut() << " (" << n_eval_grad_f_ << " calls, "
             << (t_eval_grad_f_/n_eval_grad_f_)*1000 << " ms. average)";
      userOut() << endl;
      userOut() << "time spent in eval_g: " << t_eval_g_ << " s.";
      if (n_eval_g_>0)
        userOut() << " (" << n_eval_g_ << " calls, " << (t_eval_g_/n_eval_g_)*1000
                  << " ms. average)";
      userOut() << endl;
      userOut() << "time spent in eval_jac_g: " << t_eval_jac_g_ << " s.";
      if (n_eval_jac_g_>0)
        userOut() << " (" << n_eval_jac_g_ << " calls, "
             << (t_eval_jac_g_/n_eval_jac_g_)*1000 << " ms. average)";
      userOut() << endl;
      userOut() << "time spent in eval_h: " << t_eval_h_ << " s.";
      if (n_eval_h_>1)
        userOut() << " (" << n_eval_h_ << " calls, " << (t_eval_h_/n_eval_h_)*1000
                  << " ms. average)";
      userOut() << endl;
      userOut() << "time spent in main loop: " << t_mainloop_ << " s." << endl;
      userOut() << "time spent in callback function: " << t_callback_fun_ << " s." << endl;
      userOut() << "time spent in callback preparation: " << t_callback_prepare_ << " s." << endl;
    }

    // Save statistics
    stats_["iter_count"] = iter;

    stats_["t_eval_f"] = t_eval_f_;
    stats_["t_eval_grad_f"] = t_eval_grad_f_;
    stats_["t_eval_g"] = t_eval_g_;
    stats_["t_eval_jac_g"] = t_eval_jac_g_;
    stats_["t_eval_h"] = t_eval_h_;
    stats_["t_mainloop"] = t_mainloop_;
    stats_["t_callback_fun"] = t_callback_fun_;
    stats_["t_callback_prepare"] = t_callback_prepare_;
    stats_["n_eval_f"] = n_eval_f_;
    stats_["n_eval_grad_f"] = n_eval_grad_f_;
    stats_["n_eval_g"] = n_eval_g_;
    stats_["n_eval_jac_g"] = n_eval_jac_g_;
    stats_["n_eval_h"] = n_eval_h_;
  }
Пример #16
0
void NLPSolverInternal::init(){
  // Read options
  verbose_ = getOption("verbose");
  gauss_newton_ = getOption("gauss_newton");
  
  // Initialize the functions
  casadi_assert_message(!F_.isNull(),"No objective function");
  if(!F_.isInit()){
    F_.init();
    log("Objective function initialized");
  }
  if(!G_.isNull() && !G_.isInit()){
    G_.init();
    log("Constraint function initialized");
  }

  // Get dimensions
  n_ = F_.input(0).numel();
  m_ = G_.isNull() ? 0 : G_.output(0).numel();

  parametric_ = getOption("parametric");
  
  if (parametric_) {
    casadi_assert_message(F_.getNumInputs()==2, "Wrong number of input arguments to F for parametric NLP. Must be 2, but got " << F_.getNumInputs());
  } else {
    casadi_assert_message(F_.getNumInputs()==1, "Wrong number of input arguments to F for non-parametric NLP. Must be 1, but got " << F_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option.");
  }

  // Basic sanity checks
  casadi_assert_message(F_.getNumInputs()==1 || F_.getNumInputs()==2, "Wrong number of input arguments to F. Must be 1 or 2");
  
  if (F_.getNumInputs()==2) parametric_=true;
  casadi_assert_message(getOption("ignore_check_vec") || gauss_newton_ || F_.input().size2()==1,
     "To avoid confusion, the input argument to F must be vector. You supplied " << F_.input().dimString() << endl <<
     " We suggest you make the following changes:" << endl <<
     "   -  F is an SXFunction:  SXFunction([X],[rhs]) -> SXFunction([vec(X)],[rhs])" << endl <<
     "             or            F -                   ->  F = vec(F) " << 
     "   -  F is an MXFunction:  MXFunction([X],[rhs]) -> " <<  endl <<
     "                                     X_vec = MX(\"X\",vec(X.sparsity())) " << endl <<
     "                                     F_vec = MXFunction([X_flat],[F.call([X_flat.reshape(X.sparsity())])[0]]) " << endl <<
     "             or            F -                   ->  F = vec(F) " << 
     " You may ignore this warning by setting the 'ignore_check_vec' option to true." << endl
  );
  
  casadi_assert_message(F_.getNumOutputs()>=1, "Wrong number of output arguments to F");
  casadi_assert_message(gauss_newton_  || F_.output().scalar(), "Output argument of F not scalar.");
  casadi_assert_message(F_.output().dense(), "Output argument of F not dense.");
  casadi_assert_message(F_.input().dense(), "Input argument of F must be dense. You supplied " << F_.input().dimString());
  
  if(!G_.isNull()) {
    if (parametric_) {
      casadi_assert_message(G_.getNumInputs()==2, "Wrong number of input arguments to G for parametric NLP. Must be 2, but got " << G_.getNumInputs());
    } else {
      casadi_assert_message(G_.getNumInputs()==1, "Wrong number of input arguments to G for non-parametric NLP. Must be 1, but got " << G_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option.");
    }
    casadi_assert_message(G_.getNumOutputs()>=1, "Wrong number of output arguments to G");
    casadi_assert_message(G_.input().numel()==n_, "Inconsistent dimensions");
    casadi_assert_message(G_.input().sparsity()==F_.input().sparsity(), "F and G input dimension must match. F " << F_.input().dimString() << ". G " << G_.input().dimString());
  }
  
  // Find out if we are to expand the objective function in terms of scalar operations
  bool expand_f = getOption("expand_f");
  if(expand_f){
    log("Expanding objective function");
    
    // Cast to MXFunction
    MXFunction F_mx = shared_cast<MXFunction>(F_);
    if(F_mx.isNull()){
      casadi_warning("Cannot expand objective function as it is not an MXFunction");
    } else {
      // Take use the input scheme of G if possible (it might be an SXFunction)
      vector<SXMatrix> inputv;
      if(!G_.isNull() && F_.getNumInputs()==G_.getNumInputs()){
        inputv = G_.symbolicInputSX();
      } else {
        inputv = F_.symbolicInputSX();
      }
      
      // Try to expand the MXFunction
      F_ = F_mx.expand(inputv);
      F_.setOption("number_of_fwd_dir",F_mx.getOption("number_of_fwd_dir"));
      F_.setOption("number_of_adj_dir",F_mx.getOption("number_of_adj_dir"));
      F_.init();
    }
  }
  
  
  // Find out if we are to expand the constraint function in terms of scalar operations
  bool expand_g = getOption("expand_g");
  if(expand_g){
    log("Expanding constraint function");
    
    // Cast to MXFunction
    MXFunction G_mx = shared_cast<MXFunction>(G_);
    if(G_mx.isNull()){
      casadi_warning("Cannot expand constraint function as it is not an MXFunction");
    } else {
      // Take use the input scheme of F if possible (it might be an SXFunction)
      vector<SXMatrix> inputv;
      if(F_.getNumInputs()==G_.getNumInputs()){
        inputv = F_.symbolicInputSX();
      } else {
        inputv = G_.symbolicInputSX();
      }
      
      // Try to expand the MXFunction
      G_ = G_mx.expand(inputv);
      G_.setOption("number_of_fwd_dir",G_mx.getOption("number_of_fwd_dir"));
      G_.setOption("number_of_adj_dir",G_mx.getOption("number_of_adj_dir"));
      G_.init();
    }
  }
  
  // Find out if we are to expand the constraint function in terms of scalar operations
  bool generate_hessian = getOption("generate_hessian");
  if(generate_hessian && H_.isNull()){
    casadi_assert_message(!gauss_newton_,"Automatic generation of Gauss-Newton Hessian not yet supported");
    log("generating hessian");
    
    // Simple if unconstrained
    if(G_.isNull()){
      // Create Hessian of the objective function
      FX HF = F_.hessian();
      HF.init();
      
      // Symbolic inputs of HF
      vector<MX> HF_in = F_.symbolicInput();
      
      // Lagrange multipliers
      MX lam("lam",0);
      
      // Objective function scaling
      MX sigma("sigma");
      
      // Inputs of the Hessian function
      vector<MX> H_in = HF_in;
      H_in.insert(H_in.begin()+1, lam);
      H_in.insert(H_in.begin()+2, sigma);

      // Get an expression for the Hessian of F
      MX hf = HF.call(HF_in).at(0);
      
      // Create the scaled Hessian function
      H_ = MXFunction(H_in, sigma*hf);
      log("Unconstrained Hessian function generated");
      
    } else { // G_.isNull()
      
      // Check if the functions are SXFunctions
      SXFunction F_sx = shared_cast<SXFunction>(F_);
      SXFunction G_sx = shared_cast<SXFunction>(G_);
      
      // Efficient if both functions are SXFunction
      if(!F_sx.isNull() && !G_sx.isNull()){
        // Expression for f and g
        SXMatrix f = F_sx.outputSX();
        SXMatrix g = G_sx.outputSX();
        
        // Numeric hessian
        bool f_num_hess = F_sx.getOption("numeric_hessian");
        bool g_num_hess = G_sx.getOption("numeric_hessian");
        
        // Number of derivative directions
        int f_num_fwd = F_sx.getOption("number_of_fwd_dir");
        int g_num_fwd = G_sx.getOption("number_of_fwd_dir");
        int f_num_adj = F_sx.getOption("number_of_adj_dir");
        int g_num_adj = G_sx.getOption("number_of_adj_dir");
        
        // Substitute symbolic variables in f if different input variables from g
        if(!isEqual(F_sx.inputSX(),G_sx.inputSX())){
          f = substitute(f,F_sx.inputSX(),G_sx.inputSX());
        }
        
        // Lagrange multipliers
        SXMatrix lam = ssym("lambda",g.size1());

        // Objective function scaling
        SXMatrix sigma = ssym("sigma");        
        
        // Lagrangian function
        vector<SXMatrix> lfcn_in(parametric_? 4: 3);
        lfcn_in[0] = G_sx.inputSX();
        lfcn_in[1] = lam;
        lfcn_in[2] = sigma;
        if (parametric_) lfcn_in[3] = G_sx.inputSX(1);
        SXFunction lfcn(lfcn_in, sigma*f + inner_prod(lam,g));
        lfcn.setOption("verbose",getOption("verbose"));
        lfcn.setOption("numeric_hessian",f_num_hess || g_num_hess);
        lfcn.setOption("number_of_fwd_dir",std::min(f_num_fwd,g_num_fwd));
        lfcn.setOption("number_of_adj_dir",std::min(f_num_adj,g_num_adj));
        lfcn.init();
        
        // Hessian of the Lagrangian
        H_ = static_cast<FX&>(lfcn).hessian();
        H_.setOption("verbose",getOption("verbose"));
        log("SX Hessian function generated");
        
      } else { // !F_sx.isNull() && !G_sx.isNull()
        // Check if the functions are SXFunctions
        MXFunction F_mx = shared_cast<MXFunction>(F_);
        MXFunction G_mx = shared_cast<MXFunction>(G_);
        
        // If they are, check if the arguments are the same
        if(!F_mx.isNull() && !G_mx.isNull() && isEqual(F_mx.inputMX(),G_mx.inputMX())){
          casadi_warning("Exact Hessian calculation for MX is still experimental");
          
          // Expression for f and g
          MX f = F_mx.outputMX();
          MX g = G_mx.outputMX();
          
          // Lagrange multipliers
          MX lam("lam",g.size1());
      
          // Objective function scaling
          MX sigma("sigma");

          // Inputs of the Lagrangian function
          vector<MX> lfcn_in(parametric_? 4:3);
          lfcn_in[0] = G_mx.inputMX();
          lfcn_in[1] = lam;
          lfcn_in[2] = sigma;
          if (parametric_) lfcn_in[3] = G_mx.inputMX(1);

          // Lagrangian function
          MXFunction lfcn(lfcn_in,sigma*f+ inner_prod(lam,g));
          lfcn.init();
	  log("SX Lagrangian function generated");
          
/*          cout << "countNodes(lfcn.outputMX()) = " << countNodes(lfcn.outputMX()) << endl;*/
      
          bool adjoint_mode = true;
          if(adjoint_mode){
          
            // Gradient of the lagrangian
            MX gL = lfcn.grad();
            log("MX Lagrangian gradient generated");

            MXFunction glfcn(lfcn_in,gL);
            glfcn.init();
            log("MX Lagrangian gradient function initialized");
//           cout << "countNodes(glfcn.outputMX()) = " << countNodes(glfcn.outputMX()) << endl;

            // Get Hessian sparsity
            CRSSparsity H_sp = glfcn.jacSparsity();
            log("MX Lagrangian Hessian sparsity determined");
            
            // Uni-directional coloring (note, the hessian is symmetric)
            CRSSparsity coloring = H_sp.unidirectionalColoring(H_sp);
            log("MX Lagrangian Hessian coloring determined");

            // Number of colors needed is the number of rows
            int nfwd_glfcn = coloring.size1();
            log("MX Lagrangian gradient function number of sensitivity directions determined");

            glfcn.setOption("number_of_fwd_dir",nfwd_glfcn);
            glfcn.updateNumSens();
            log("MX Lagrangian gradient function number of sensitivity directions updated");
            
            // Hessian of the Lagrangian
            H_ = glfcn.jacobian();
          } else {

            // Hessian of the Lagrangian
            H_ = lfcn.hessian();
            
          }
          log("MX Lagrangian Hessian function generated");
          
        } else {
          casadi_assert_message(0, "Automatic calculation of exact Hessian currently only for F and G both SXFunction or MXFunction ");
        }
      } // !F_sx.isNull() && !G_sx.isNull()
    } // G_.isNull()
  } // generate_hessian && H_.isNull()
  if(!H_.isNull() && !H_.isInit()) {
    H_.init();
    log("Hessian function initialized");
  }

  // Create a Jacobian if it does not already exists
  bool generate_jacobian = getOption("generate_jacobian");
  if(generate_jacobian && !G_.isNull() && J_.isNull()){
    log("Generating Jacobian");
    J_ = G_.jacobian();
    
    // Use live variables if SXFunction
    if(!shared_cast<SXFunction>(J_).isNull()){
      J_.setOption("live_variables",true);
    }
    log("Jacobian function generated");
  }
    
  if(!J_.isNull() && !J_.isInit()){
    J_.init();
    log("Jacobian function initialized");
  }

  
  if(!H_.isNull()) {
    if (parametric_) {
      casadi_assert_message(H_.getNumInputs()>=2, "Wrong number of input arguments to H for parametric NLP. Must be at least 2, but got " << G_.getNumInputs());
    } else {
      casadi_assert_message(H_.getNumInputs()>=1, "Wrong number of input arguments to H for non-parametric NLP. Must be at least 1, but got " << G_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option.");
    }
    casadi_assert_message(H_.getNumOutputs()>=1, "Wrong number of output arguments to H");
    casadi_assert_message(H_.input(0).numel()==n_,"Inconsistent dimensions");
    casadi_assert_message(H_.output().size1()==n_,"Inconsistent dimensions");
    casadi_assert_message(H_.output().size2()==n_,"Inconsistent dimensions");
  }

  if(!J_.isNull()){
    if (parametric_) {
      casadi_assert_message(J_.getNumInputs()==2, "Wrong number of input arguments to J for parametric NLP. Must be at least 2, but got " << G_.getNumInputs());
    } else {
      casadi_assert_message(J_.getNumInputs()==1, "Wrong number of input arguments to J for non-parametric NLP. Must be at least 1, but got " << G_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option.");
    }
    casadi_assert_message(J_.getNumOutputs()>=1, "Wrong number of output arguments to J");
    casadi_assert_message(J_.input().numel()==n_,"Inconsistent dimensions");
    casadi_assert_message(J_.output().size2()==n_,"Inconsistent dimensions");
  }

  if (parametric_) {
    sp_p = F_->input(1).sparsity();
    
    if (!G_.isNull()) casadi_assert_message(sp_p == G_->input(G_->getNumInputs()-1).sparsity(),"Parametric NLP has inconsistent parameter dimensions. F has got " << sp_p.dimString() << " as dimensions, while G has got " << G_->input(G_->getNumInputs()-1).dimString());
    if (!H_.isNull()) casadi_assert_message(sp_p == H_->input(H_->getNumInputs()-1).sparsity(),"Parametric NLP has inconsistent parameter dimensions. F has got " << sp_p.dimString() << " as dimensions, while H has got " << H_->input(H_->getNumInputs()-1).dimString());
    if (!J_.isNull()) casadi_assert_message(sp_p == J_->input(J_->getNumInputs()-1).sparsity(),"Parametric NLP has inconsistent parameter dimensions. F has got " << sp_p.dimString() << " as dimensions, while J has got " << J_->input(J_->getNumInputs()-1).dimString());
  }
  
  // Infinity
  double inf = numeric_limits<double>::infinity();
  
  // Allocate space for inputs
  input_.resize(NLP_NUM_IN - (parametric_? 0 : 1));
  input(NLP_X_INIT)      = DMatrix(n_,1,0);
  input(NLP_LBX)         = DMatrix(n_,1,-inf);
  input(NLP_UBX)         = DMatrix(n_,1, inf);
  input(NLP_LBG)         = DMatrix(m_,1,-inf);
  input(NLP_UBG)         = DMatrix(m_,1, inf);
  input(NLP_LAMBDA_INIT) = DMatrix(m_,1,0);
  if (parametric_) input(NLP_P) = DMatrix(sp_p,0);
  
  // Allocate space for outputs
  output_.resize(NLP_NUM_OUT);
  output(NLP_X_OPT)      = DMatrix(n_,1,0);
  output(NLP_COST)       = DMatrix(1,1,0);
  output(NLP_LAMBDA_X)   = DMatrix(n_,1,0);
  output(NLP_LAMBDA_G)   = DMatrix(m_,1,0);
  output(NLP_G)          = DMatrix(m_,1,0);
  
  if (hasSetOption("iteration_callback")) {
   callback_ = getOption("iteration_callback");
   if (!callback_.isNull()) {
     if (!callback_.isInit()) callback_.init();
     casadi_assert_message(callback_.getNumOutputs()==1, "Callback function should have one output, a scalar that indicates wether to break. 0 = continue");
     casadi_assert_message(callback_.output(0).size()==1, "Callback function should have one output, a scalar that indicates wether to break. 0 = continue");
     casadi_assert_message(callback_.getNumInputs()==NLP_NUM_OUT, "Callback function should have the output scheme of NLPSolver as input scheme. i.e. " <<NLP_NUM_OUT << " inputs instead of the " << callback_.getNumInputs() << " you provided." );
     for (int i=0;i<NLP_NUM_OUT;i++) {
       casadi_assert_message(callback_.input(i).sparsity()==output(i).sparsity(),
         "Callback function should have the output scheme of NLPSolver as input scheme. " << 
         "Input #" << i << " (" << getSchemeEntryEnumName(SCHEME_NLPOutput,i) <<  " aka '" << getSchemeEntryName(SCHEME_NLPOutput,i) << "') was found to be " << callback_.input(i).dimString() << " instead of expected " << output(i).dimString() << "."
       );
       callback_.input(i).setAll(0);
     }
   }
  }
  
  callback_step_ = getOption("iteration_callback_step");

  // Call the initialization method of the base class
  FXInternal::init();
}
Пример #17
0
  int OoqpInterface::
  eval(const double** arg, double** res, casadi_int* iw, double* w, void* mem) const {

    return_status_ = -1;
    success_ = false;
    if (inputs_check_) {
      check_inputs(arg[CONIC_LBX], arg[CONIC_UBX], arg[CONIC_LBA], arg[CONIC_UBA]);
    }

    // Get problem data
    double* g=w; w += nx_;
    casadi_copy(arg[CONIC_G], nx_, g);
    double* lbx=w; w += nx_;
    casadi_copy(arg[CONIC_LBX], nx_, lbx);
    double* ubx=w; w += nx_;
    casadi_copy(arg[CONIC_UBX], nx_, ubx);
    double* lba=w; w += na_;
    casadi_copy(arg[CONIC_LBA], na_, lba);
    double* uba=w; w += na_;
    casadi_copy(arg[CONIC_UBA], na_, uba);
    double* H=w; w += nnz_in(CONIC_H);
    casadi_copy(arg[CONIC_H], nnz_in(CONIC_H), H);
    double* A=w; w += nnz_in(CONIC_A);
    casadi_copy(arg[CONIC_A], nnz_in(CONIC_A), A);

    // Temporary memory
    double* c_ = w; w += nx_;
    double* bA_ = w; w += na_;
    double* xlow_ = w; w += nx_;
    double* xupp_ = w; w += nx_;
    double* clow_ = w; w += na_;
    double* cupp_ = w; w += na_;
    double* x_ = w; w += nx_;
    double* gamma_ = w; w += nx_;
    double* phi_ = w; w += nx_;
    double* y_ = w; w += na_;
    double* z_ = w; w += na_;
    double* lambda_ = w; w += na_;
    double* pi_ = w; w += na_;
    char* ixlow_ = reinterpret_cast<char*>(iw); iw += nx_;
    char* ixupp_ = reinterpret_cast<char*>(iw); iw += nx_;
    char* iclow_ = reinterpret_cast<char*>(iw); iw += na_;
    char* icupp_ = reinterpret_cast<char*>(iw); iw += na_;
    double* dQ_ = w; w += nQ_;
    double* dA_ = w; w += nA_;
    double* dC_ = w; w += nA_;
    int* irowQ_ = reinterpret_cast<int*>(iw); iw += nQ_;
    int* jcolQ_ = reinterpret_cast<int*>(iw); iw += nQ_;
    int* irowA_ = reinterpret_cast<int*>(iw); iw += nA_;
    int* jcolA_ = reinterpret_cast<int*>(iw); iw += nA_;
    int* irowC_ = reinterpret_cast<int*>(iw); iw += nA_;
    int* jcolC_ = reinterpret_cast<int*>(iw); iw += nA_;
    int* x_index_ = reinterpret_cast<int*>(iw); iw += nx_;
    int* c_index_ = reinterpret_cast<int*>(iw); iw += na_;
    double* p_ = w; w += nx_;
    double* AT = w; w += nA_;

    // Parameter contribution to the objective
    double objParam = 0;

    // Get the number of free variables and their types
    casadi_int nx = 0, np=0;
    for (casadi_int i=0; i<nx_; ++i) {
      if (lbx[i]==ubx[i]) {
        // Save parameter
        p_[np] = lbx[i];

        // Add contribution to objective
        objParam += g[i]*p_[np];

        // Save index
        x_index_[i] = -1-np++;

      } else {
        // True free variable
        if (lbx[i]==-numeric_limits<double>::infinity()) {
          xlow_[nx] = 0;
          ixlow_[nx] = 0;
        } else {
          xlow_[nx] = lbx[i];
          ixlow_[nx] = 1;
        }
        if (ubx[i]==numeric_limits<double>::infinity()) {
          xupp_[nx] = 0;
          ixupp_[nx] = 0;
        } else {
          xupp_[nx] = ubx[i];
          ixupp_[nx] = 1;
        }
        c_[nx] = g[i];
        x_index_[i] = nx++;
      }
    }

    // Get quadratic term
    const casadi_int* H_colind = H_.colind();
    const casadi_int* H_row = H_.row();
    casadi_int nnzQ = 0;
    // Loop over the columns of the quadratic term
    for (casadi_int cc=0; cc<nx_; ++cc) {

      // Loop over nonzero elements of the column
      for (casadi_int el=H_colind[cc]; el<H_colind[cc+1]; ++el) {

        // Only upper triangular part
        casadi_int rr=H_row[el];
        if (rr>cc) break;

        // Get variable types
        casadi_int icc=x_index_[cc];
        casadi_int irr=x_index_[rr];

        if (icc<0) {
          if (irr<0) {
            // Add contribution to objective
            objParam += icc==irr ? H[el]*sq(p_[-1-icc])/2 : H[el]*p_[-1-irr]*p_[-1-icc];
          } else {
            // Add contribution to gradient term
            c_[irr] += H[el]*p_[-1-icc];
          }
        } else {
          if (irr<0) {
            // Add contribution to gradient term
            c_[icc] += H[el]*p_[-1-irr];
          } else {
            // Add to sparsity pattern
            irowQ_[nnzQ] = icc; // row-major --> indices swapped
            jcolQ_[nnzQ] = irr; // row-major --> indices swapped
            dQ_[nnzQ++] = H[el];
          }
        }
      }
    }

    // Get the transpose of the sparsity pattern to be able to loop over the constraints
    casadi_trans(A, A_, AT, spAT_, iw);

    // Loop over constraints
    const casadi_int* A_colind = A_.colind();
    const casadi_int* A_row = A_.row();
    const casadi_int* AT_colind = spAT_.colind();
    const casadi_int* AT_row = spAT_.row();
    casadi_int nA=0, nC=0, /*mz=0, */ nnzA=0, nnzC=0;
    for (casadi_int j=0; j<na_; ++j) {
      if (lba[j] == -numeric_limits<double>::infinity() &&
          uba[j] ==  numeric_limits<double>::infinity()) {
        // Redundant constraint
        c_index_[j] = 0;
      } else if (lba[j]==uba[j]) {
        // Equality constraint
        bA_[nA] = lba[j];

        // Add to A
        for (casadi_int el=AT_colind[j]; el<AT_colind[j+1]; ++el) {
          casadi_int i=AT_row[el];
          if (x_index_[i]<0) {
            // Parameter
            bA_[nA] -= AT[el]*p_[-x_index_[i]-1];
          } else {
            // Free variable
            irowA_[nnzA] = nA;
            jcolA_[nnzA] = x_index_[i];
            dA_[nnzA++] = AT[el];
          }
        }
        c_index_[j] = -1-nA++;
      } else {
        // Inequality constraint
        if (lba[j]==-numeric_limits<double>::infinity()) {
          clow_[nC] = 0;
          iclow_[nC] = 0;
        } else {
          clow_[nC] = lba[j];
          iclow_[nC] = 1;
        }
        if (uba[j]==numeric_limits<double>::infinity()) {
          cupp_[nC] = 0;
          icupp_[nC] = 0;
        } else {
          cupp_[nC] = uba[j];
          icupp_[nC] = 1;
        }

        // Add to C
        for (casadi_int el=AT_colind[j]; el<AT_colind[j+1]; ++el) {
          casadi_int i=AT_row[el];
          if (x_index_[i]<0) {
            // Parameter
            if (iclow_[nC]==1) clow_[nC] -= AT[el]*p_[-x_index_[i]-1];
            if (icupp_[nC]==1) cupp_[nC] -= AT[el]*p_[-x_index_[i]-1];
          } else {
            // Free variable
            irowC_[nnzC] = nC;
            jcolC_[nnzC] = x_index_[i];
            dC_[nnzC++] = AT[el];
          }
        }
        c_index_[j] = 1+nC++;
      }
    }

    // Reset the solution
    casadi_fill(x_, nx_, 0.);
    casadi_fill(gamma_, nx_, 0.);
    casadi_fill(phi_, nx_, 0.);
    casadi_fill(y_, na_, 0.);
    casadi_fill(z_, na_, 0.);
    casadi_fill(lambda_, na_, 0.);
    casadi_fill(pi_, na_, 0.);

    // Solve the QP
    double objectiveValue;

    int ierr;
    if (false) { // Use C interface
      // TODO(jgillis): Change to conicvehb, see OOQP users guide
      qpsolvesp(c_, nx,
                irowQ_,  nnzQ, jcolQ_, dQ_,
                xlow_, ixlow_,
                xupp_, ixupp_,
                irowA_, nnzA, jcolA_, dA_,
                bA_, nA,
                irowC_, nnzC, jcolC_, dC_,
                clow_, nC, iclow_,
                cupp_, icupp_,
                x_, gamma_, phi_,
                y_,
                z_, lambda_, pi_,
                &objectiveValue,
                print_level_, &ierr);
    } else { // Use C++ interface
      ierr=0;
      // All OOQP related allocations in evaluate

      std::vector<int> krowQ(nx+1);
      std::vector<int> krowA(nA+1);
      std::vector<int> krowC(nC+1);

      //casadi_int status_code = 0;
      makehb(irowQ_, nnzQ, get_ptr(krowQ), nx, &ierr);
      if (ierr == 0) makehb(irowA_, nnzA, get_ptr(krowA), nA, &ierr);
      if (ierr == 0) makehb(irowC_, nnzC, get_ptr(krowC), nC, &ierr);

      if (ierr == 0) {
        QpGenContext ctx;

        QpGenHbGondzioSetup(c_, nx, get_ptr(krowQ), jcolQ_, dQ_,
                            xlow_, ixlow_, xupp_, ixupp_,
                            get_ptr(krowA), nA, jcolA_, dA_, bA_,
                            get_ptr(krowC), nC, jcolC_, dC_,
                            clow_, iclow_, cupp_, icupp_, &ctx,
                            &ierr);
        if (ierr == 0) {
          Solver* solver = static_cast<Solver *>(ctx.solver);
          gOoqpPrintLevel = print_level_;
          solver->monitorSelf();
          solver->setMuTol(mutol_);
          solver->setMuTol(mutol_);

          QpGenFinish(&ctx, x_, gamma_, phi_,
                      y_, z_, lambda_, pi_,
                      &objectiveValue, &ierr);
        }

        QpGenCleanup(&ctx);
      }
    }

    return_status_ = ierr;
    success_ = ierr==SUCCESSFUL_TERMINATION;
    if (ierr>0) {
      casadi_warning("Unable to solve problem: " + str(errFlag(ierr)));
    } else if (ierr<0) {
      casadi_error("Fatal error: " + str(errFlag(ierr)));
    }

    // Retrieve eliminated decision variables
    for (casadi_int i=nx_-1; i>=0; --i) {
      casadi_int ii = x_index_[i];
      if (ii<0) {
        x_[i] = p_[-1-ii];
      } else {
        x_[i] = x_[ii];
      }
    }

    // Retreive eliminated dual variables (linear bounds)
    for (casadi_int j=na_-1; j>=0; --j) {
      casadi_int jj = c_index_[j];
      if (jj==0) {
        lambda_[j] = 0;
      } else if (jj<0) {
        lambda_[j] = -y_[-1-jj];
      } else {
        lambda_[j] = pi_[-1+jj]-lambda_[-1+jj];
      }
    }

    // Retreive eliminated dual variables (simple bounds)
    for (casadi_int i=nx_-1; i>=0; --i) {
      casadi_int ii = x_index_[i];
      if (ii<0) {
        // The dual solution for the fixed parameters follows from the KKT conditions
        gamma_[i] = -g[i];
        for (casadi_int el=H_colind[i]; el<H_colind[i+1]; ++el) {
          casadi_int j=H_row[el];
          gamma_[i] -= H[el]*x_[j];
        }
        for (casadi_int el=A_colind[i]; el<A_colind[i+1]; ++el) {
          casadi_int j=A_row[el];
          gamma_[i] -= A[el]*lambda_[j];
        }
      } else {
        gamma_[i] = phi_[ii]-gamma_[ii];
      }
    }

    // Save optimal cost
    if (res[CONIC_COST]) *res[CONIC_COST] = objectiveValue + objParam;

    // Save primal solution
    casadi_copy(x_, nx_, res[CONIC_X]);

    // Save dual solution (linear bounds)
    casadi_copy(lambda_, na_, res[CONIC_LAM_A]);

    // Save dual solution (simple bounds)
    casadi_copy(gamma_, nx_, res[CONIC_LAM_X]);
    return 0;
  }
Пример #18
0
  void WorhpInterface::init(const Dict& opts) {

    // Call the init method of the base class
    Nlpsol::init(opts);

    if (CheckWorhpVersion(WORHP_MAJOR, WORHP_MINOR, WORHP_PATCH)) {
      casadi_warning("Worhp incompatibility. Interface was compiled for Worhp " +
        str(WORHP_MAJOR) + "." + str(WORHP_MINOR) + "." + std::string(WORHP_PATCH));
    }

    // Default options
    Dict worhp_opts;

    // Read user options
    for (auto&& op : opts) {
      if (op.first=="worhp") {
        worhp_opts = op.second;
      }
    }

    // Sort Worhp options
    casadi_int nopts = WorhpGetParamCount();
    for (auto&& op : worhp_opts) {
      if (op.first.compare("qp")==0) {
        qp_opts_ = op.second;
        continue;
      }

      // Get corresponding index using a linear search
      casadi_int ind;
      for (ind=1; ind<=nopts; ++ind) {
        // Get name in WORHP
        const char* name = WorhpGetParamName(ind);
        // Break if matching name
        if (op.first.compare(name)==0) break;
      }
      if (ind>nopts) casadi_error("No such Worhp option: " + op.first);

      // Add to the corresponding list
      switch (WorhpGetParamType(ind)) {
      case WORHP_BOOL_T:
        bool_opts_[op.first] = op.second;
        break;
      case WORHP_DOUBLE_T:
        double_opts_[op.first] = op.second;
        break;
      case WORHP_INT_T:
        int_opts_[op.first] = op.second;
        break;
      default:
        casadi_error("Cannot handle WORHP option \"" + op.first + "\": Unknown type " +
          str(WorhpGetParamType(ind)) + ".");
        break;
      }
    }

    // Setup NLP functions
    f_fcn_ = create_function("nlp_f", {"x", "p"}, {"f"});
    g_fcn_ = create_function("nlp_g", {"x", "p"}, {"g"});
    grad_f_fcn_ = create_function("nlp_grad_f", {"x", "p"}, {"f", "grad:f:x"});
    jac_g_fcn_ = create_function("nlp_jac_g", {"x", "p"}, {"g", "jac:g:x"});
    jacg_sp_ = jac_g_fcn_.sparsity_out(1);
    hess_l_fcn_ = create_function("nlp_hess_l", {"x", "p", "lam:f", "lam:g"},
                                  {"transpose:hess:gamma:x:x"},
                                  {{"gamma", {"f", "g"}}});
    hesslag_sp_ = hess_l_fcn_.sparsity_out(0);

    // Temporary vectors
    alloc_w(nx_); // for fetching diagonal entries form Hessian
  }