示例#1
0
  void Newton::init(const Dict& opts) {

    // Call the base class initializer
    Rootfinder::init(opts);

    // Default options
    max_iter_ = 1000;
    abstol_ = 1e-12;
    abstolStep_ = 1e-12;
    print_iteration_ = false;

    // Read options
    for (auto&& op : opts) {
      if (op.first=="max_iter") {
        max_iter_ = op.second;
      } else if (op.first=="abstol") {
        abstol_ = op.second;
      } else if (op.first=="abstolStep") {
        abstolStep_ = op.second;
      } else if (op.first=="print_iteration") {
        print_iteration_ = op.second;
      }
    }

    casadi_assert_message(oracle_.n_in()>0,
                          "Newton: the supplied f must have at least one input.");
    casadi_assert_message(!linsol_.is_null(),
                          "Newton::init: linear_solver must be supplied");

    // Allocate memory
    alloc_w(n_, true); // x
    alloc_w(n_, true); // F
    alloc_w(sp_jac_.nnz(), true); // J
  }
示例#2
0
文件: switch.cpp 项目: casadi/casadi
  void Switch::init(const Dict& opts) {
    // Call the initialization method of the base class
    FunctionInternal::init(opts);

    // Buffer for mismatching sparsities
    size_t sz_buf=0;

    // Keep track of sparsity projections
    project_in_ = project_out_ = false;

    // Get required work
    for (casadi_int k=0; k<=f_.size(); ++k) {
      const Function& fk = k<f_.size() ? f_[k] : f_def_;
      if (fk.is_null()) continue;

      // Memory for evaluation
      alloc(fk);

      // Required work vectors
      size_t sz_buf_k=0;

      // Add size for input buffers
      for (casadi_int i=1; i<n_in_; ++i) {
        const Sparsity& s = fk.sparsity_in(i-1);
        if (s!=sparsity_in_[i]) {
          project_in_ = true;
          alloc_w(s.size1()); // for casadi_project
          sz_buf_k += s.nnz();
        }
      }

      // Add size for output buffers
      for (casadi_int i=0; i<n_out_; ++i) {
        const Sparsity& s = fk.sparsity_out(i);
        if (s!=sparsity_out_[i]) {
          project_out_ = true;
          alloc_w(s.size1()); // for casadi_project
          sz_buf_k += s.nnz();
        }
      }

      // Only need the largest of these work vectors
      sz_buf = max(sz_buf, sz_buf_k);
    }

    // Memory for the work vectors
    alloc_w(sz_buf, true);
  }
示例#3
0
  void Map::init(const Dict& opts) {
    // Call the initialization method of the base class
    FunctionInternal::init(opts);

    // Allocate sufficient memory for serial evaluation
    alloc_arg(f_.sz_arg());
    alloc_res(f_.sz_res());
    alloc_w(f_.sz_w());
    alloc_iw(f_.sz_iw());
  }
示例#4
0
  void MapOmp::init(const Dict& opts) {
    // Call the initialization method of the base class
    Map::init(opts);

    // Allocate memory for holding memory object references
    alloc_iw(n_, true);

    // Allocate sufficient memory for parallel evaluation
    alloc_arg(f_.sz_arg() * n_);
    alloc_res(f_.sz_res() * n_);
    alloc_w(f_.sz_w() * n_);
    alloc_iw(f_.sz_iw() * n_);
  }
示例#5
0
  void QpToNlp::init(const Dict& opts) {
    // Initialize the base classes
    Qpsol::init(opts);

    // Default options
    string nlpsol_plugin;
    Dict nlpsol_options;

    // Read user options
    for (auto&& op : opts) {
      if (op.first=="nlpsol") {
        nlpsol_plugin = op.second.to_string();
      } else if (op.first=="nlpsol_options") {
        nlpsol_options = op.second;
      }
    }

    // Create a symbolic matrix for the decision variables
    SX X = SX::sym("X", n_, 1);

    // Parameters to the problem
    SX H = SX::sym("H", sparsity_in(QPSOL_H));
    SX G = SX::sym("G", sparsity_in(QPSOL_G));
    SX A = SX::sym("A", sparsity_in(QPSOL_A));

    // Put parameters in a vector
    std::vector<SX> par;
    par.push_back(H.nonzeros());
    par.push_back(G.nonzeros());
    par.push_back(A.nonzeros());

    // The nlp looks exactly like a mathematical description of the NLP
    SXDict nlp = {{"x", X}, {"p", vertcat(par)},
                  {"f", mtimes(G.T(), X) + 0.5*mtimes(mtimes(X.T(), H), X)},
                  {"g", mtimes(A, X)}};

    // Create an Nlpsol instance
    casadi_assert_message(!nlpsol_plugin.empty(), "'nlpsol' option has not been set");
    solver_ = nlpsol("nlpsol", nlpsol_plugin, nlp, nlpsol_options);
    alloc(solver_);

    // Allocate storage for NLP solver  parameters
    alloc_w(solver_.nnz_in(NLPSOL_P), true);
  }
示例#6
0
  void GurobiInterface::init(const Dict& opts) {
    // Initialize the base classes
    Conic::init(opts);

    // Default options
    std::vector<std::string> vtype;

    // Read options
    for (auto&& op : opts) {
      if (op.first=="vtype") {
        vtype = op.second;
      }
    }

    // Variable types
    if (!vtype.empty()) {
      casadi_assert_message(vtype.size()==nx_, "Option 'vtype' has wrong length");
      vtype_.resize(nx_);
      for (int i=0; i<nx_; ++i) {
        if (vtype[i]=="continuous") {
          vtype_[i] = GRB_CONTINUOUS;
        } else if (vtype[i]=="binary") {
          vtype_[i] = GRB_BINARY;
        } else if (vtype[i]=="integer") {
          vtype_[i] = GRB_INTEGER;
        } else if (vtype[i]=="semicont") {
          vtype_[i] = GRB_SEMICONT;
        } else if (vtype[i]=="semiint") {
          vtype_[i] = GRB_SEMIINT;
        } else {
          casadi_error("No such variable type: " + vtype[i]);
        }
      }
    }

    // Temporary memory
    alloc_w(nx_, true); // val
    alloc_iw(nx_, true); // ind
    alloc_iw(nx_, true); // ind2
    alloc_iw(nx_, true); // tr_ind
  }
示例#7
0
  void SundialsInterface::init(const Dict& opts) {
    // Call the base class method
    Integrator::init(opts);

    // If sensitivity equations, make sure derivative_of_ is available
    casadi_assert_message(ns_==0 || !derivative_of_.is_null(),
      "Not implemented.");

    // Default options
    abstol_ = 1e-8;
    reltol_ = 1e-6;
    max_num_steps_ = 10000;
    stop_at_end_ = true;
    use_precon_ = true;
    max_krylov_ = 10;
    linear_solver_ = "csparse";
    string newton_scheme = "direct";
    quad_err_con_ = false;
    string interpolation_type = "hermite";
    steps_per_checkpoint_ = 20;
    disable_internal_warnings_ = false;
    max_multistep_order_ = 5;
    second_order_correction_ = true;
    step0_ = 0;
    max_order_ = 0;
    nonlin_conv_coeff_ = 0;

    // Read options
    for (auto&& op : opts) {
      if (op.first=="abstol") {
        abstol_ = op.second;
      } else if (op.first=="reltol") {
        reltol_ = op.second;
      } else if (op.first=="max_num_steps") {
        max_num_steps_ = op.second;
      } else if (op.first=="stop_at_end") {
        stop_at_end_ = op.second;
      } else if (op.first=="use_preconditioner") {
        use_precon_ = op.second;
      } else if (op.first=="max_krylov") {
        max_krylov_ = op.second;
      } else if (op.first=="newton_scheme") {
        newton_scheme = op.second.to_string();
      } else if (op.first=="linear_solver") {
        linear_solver_ = op.second.to_string();
      } else if (op.first=="linear_solver_options") {
        linear_solver_options_ = op.second;
      } else if (op.first=="quad_err_con") {
        quad_err_con_ = op.second;
      } else if (op.first=="interpolation_type") {
        interpolation_type = op.second.to_string();
      } else if (op.first=="steps_per_checkpoint") {
        steps_per_checkpoint_ = op.second;
      } else if (op.first=="disable_internal_warnings") {
        disable_internal_warnings_ = op.second;
      } else if (op.first=="max_multistep_order") {
        max_multistep_order_ = op.second;
      } else if (op.first=="second_order_correction") {
        second_order_correction_ = op.second;
      } else if (op.first=="step0") {
        step0_ = op.second;
      } else if (op.first=="max_order") {
        max_order_ = op.second;
      } else if (op.first=="nonlin_conv_coeff") {
        nonlin_conv_coeff_ = op.second;
      }
    }

    // Type of Newton scheme
    if (newton_scheme=="direct") {
      newton_scheme_ = SD_DIRECT;
    } else if (newton_scheme=="gmres") {
      newton_scheme_ = SD_GMRES;
    } else if (newton_scheme=="bcgstab") {
      newton_scheme_ = SD_BCGSTAB;
    } else if (newton_scheme=="tfqmr") {
      newton_scheme_ = SD_TFQMR;
    } else {
      casadi_error("Unknown Newton scheme: " + newton_scheme);
    }

    // Interpolation_type
    if (interpolation_type=="hermite") {
      interp_ = SD_HERMITE;
    } else if (interpolation_type=="polynomial") {
      interp_ = SD_POLYNOMIAL;
    } else {
      casadi_error("Unknown interpolation type: " + interpolation_type);
    }

    // Get or create Jacobians and linear system solvers
    for (bool backward : {false, true}) {
      // Skip backward?
      if (backward && nrx_==0) continue;

      // Get Jacobian function
      Function J;
      if (ns_==0) {
        J = getJ(backward);
      } else {
        SundialsInterface* d = derivative_of_.get<SundialsInterface>();
        casadi_assert(d!=0);
        if (d->ns_==0) {
          J = d->get_function(backward ? "jacB" : "jacF");
        } else {
          J = d->getJ(backward);
        }
      }
      set_function(J, J.name(), true);
      alloc_w(J.nnz_out(0), true);
    }

    // Allocate work vectors
    alloc_w(np_, true); // p
    alloc_w(nrp_, true); // rp
    alloc_w(2*max(nx_+nz_, nrx_+nrz_), true); // v1, v2

    // Allocate linear solvers
    linsolF_ = Linsol("linsolF", linear_solver_, linear_solver_options_);
    if (nrx_>0) {
      linsolB_ = Linsol("linsolB", linear_solver_, linear_solver_options_);
    }
  }
示例#8
0
  void BonminInterface::init(const Dict& opts) {
    // Call the init method of the base class
    Nlpsol::init(opts);

    // Default options
    pass_nonlinear_variables_ = true;
    pass_nonlinear_constraints_ = true;
    Dict hess_lag_options, jac_g_options, grad_f_options;

    std::vector< std::vector<int> > sos1_groups;
    std::vector< std::vector<double> > sos1_weights;
    // Read user options
    for (auto&& op : opts) {
      if (op.first=="bonmin") {
        opts_ = op.second;
      } else if (op.first=="pass_nonlinear_variables") {
        pass_nonlinear_variables_ = op.second;
      } else if (op.first=="pass_nonlinear_constraints") {
        pass_nonlinear_constraints_ = op.second;
      }  else if (op.first=="var_string_md") {
        var_string_md_ = op.second;
      } else if (op.first=="var_integer_md") {
        var_integer_md_ = op.second;
      } else if (op.first=="var_numeric_md") {
        var_numeric_md_ = op.second;
      } else if (op.first=="con_string_md") {
        con_string_md_ = op.second;
      } else if (op.first=="con_integer_md") {
        con_integer_md_ = op.second;
      } else if (op.first=="con_numeric_md") {
        con_numeric_md_ = op.second;
      } else if (op.first=="hess_lag_options") {
        hess_lag_options = op.second;
      } else if (op.first=="jac_g_options") {
        jac_g_options = op.second;
      } else if (op.first=="grad_f_options") {
        grad_f_options = op.second;
      } else if (op.first=="hess_lag") {
        Function f = op.second;
        casadi_assert_dev(f.n_in()==4);
        casadi_assert_dev(f.n_out()==1);
        set_function(f, "nlp_hess_l");
      } else if (op.first=="jac_g") {
        Function f = op.second;
        casadi_assert_dev(f.n_in()==2);
        casadi_assert_dev(f.n_out()==2);
        set_function(f, "nlp_jac_g");
      } else if (op.first=="grad_f") {
        Function f = op.second;
        casadi_assert_dev(f.n_in()==2);
        casadi_assert_dev(f.n_out()==2);
        set_function(f, "nlp_grad_f");
      } else if (op.first=="sos1_groups") {
        sos1_groups = to_int(op.second.to_int_vector_vector());
        for (auto & g : sos1_groups) {
          for (auto & e : g) e-= GlobalOptions::start_index;
        }
      } else if (op.first=="sos1_weights") {
        sos1_weights = op.second.to_double_vector_vector();
      } else if (op.first=="sos1_priorities") {
        sos1_priorities_ = to_int(op.second.to_int_vector());
      }
    }

    // Do we need second order derivatives?
    exact_hessian_ = true;
    auto hessian_approximation = opts_.find("hessian_approximation");
    if (hessian_approximation!=opts_.end()) {
      exact_hessian_ = hessian_approximation->second == "exact";
    }


    // Setup NLP functions
    create_function("nlp_f", {"x", "p"}, {"f"});
    create_function("nlp_g", {"x", "p"}, {"g"});
    if (!has_function("nlp_grad_f")) {
      create_function("nlp_grad_f", {"x", "p"}, {"f", "grad:f:x"});
    }
    if (!has_function("nlp_jac_g")) {
      create_function("nlp_jac_g", {"x", "p"}, {"g", "jac:g:x"});
    }
    jacg_sp_ = get_function("nlp_jac_g").sparsity_out(1);

    // By default, assume all nonlinear
    nl_ex_.resize(nx_, true);
    nl_g_.resize(ng_, true);

    // Allocate temporary work vectors
    if (exact_hessian_) {
      if (!has_function("nlp_hess_l")) {
        create_function("nlp_hess_l", {"x", "p", "lam:f", "lam:g"},
                        {"hess:gamma:x:x"}, {{"gamma", {"f", "g"}}});
      }
      hesslag_sp_ = get_function("nlp_hess_l").sparsity_out(0);

      if (pass_nonlinear_variables_) {
        const casadi_int* col = hesslag_sp_.colind();
        for (casadi_int i=0;i<nx_;++i) nl_ex_[i] = col[i+1]-col[i];
      }
    } else {
      if (pass_nonlinear_variables_)
        nl_ex_ = oracle_.which_depends("x", {"f", "g"}, 2, false);
    }
    if (pass_nonlinear_constraints_)
      nl_g_ = oracle_.which_depends("x", {"g"}, 2, true);

    // Create sos info

    // Declare size
    sos_num_ = sos1_groups.size();
    // sos1 type
    sos1_types_.resize(sos_num_, 1);

    casadi_assert(sos1_weights.empty() || sos1_weights.size()==sos_num_,
      "sos1_weights has incorrect size");
    casadi_assert(sos1_priorities_.empty() || sos1_priorities_.size()==sos_num_,
      "sos1_priorities has incorrect size");
    if (sos1_priorities_.empty()) sos1_priorities_.resize(sos_num_, 1);

    sos_num_nz_ = 0;
    for (casadi_int i=0;i<sos_num_;++i) {
      // get local group
      const std::vector<int>& sos1_group = sos1_groups[i];

      // Get local weights
      std::vector<double> default_weights(sos1_group.size(), 1.0);
      const std::vector<double>& sos1_weight =
        sos1_weights.empty() ? default_weights : sos1_weights[i];
      casadi_assert(sos1_weight.size()==sos1_group.size(),
        "sos1_weights has incorrect size");

      // Populate lookup vector
      sos1_starts_.push_back(sos_num_nz_);
      sos_num_nz_+=sos1_group.size();

      sos1_weights_.insert(sos1_weights_.end(), sos1_weight.begin(), sos1_weight.end());
      sos1_indices_.insert(sos1_indices_.end(), sos1_group.begin(), sos1_group.end());
    }

    sos1_starts_.push_back(sos_num_nz_);

    // Allocate work vectors
    alloc_w(nx_, true); // xk_
    alloc_w(nx_, true); // lam_xk_
    alloc_w(ng_, true); // gk_
    alloc_w(nx_, true); // grad_fk_
    alloc_w(jacg_sp_.nnz(), true); // jac_gk_
    if (exact_hessian_) {
      alloc_w(hesslag_sp_.nnz(), true); // hess_lk_
    }
  }
示例#9
0
  void WorhpInterface::init(const Dict& opts) {

    // Call the init method of the base class
    Nlpsol::init(opts);

    if (CheckWorhpVersion(WORHP_MAJOR, WORHP_MINOR, WORHP_PATCH)) {
      casadi_warning("Worhp incompatibility. Interface was compiled for Worhp " +
        str(WORHP_MAJOR) + "." + str(WORHP_MINOR) + "." + std::string(WORHP_PATCH));
    }

    // Default options
    Dict worhp_opts;

    // Read user options
    for (auto&& op : opts) {
      if (op.first=="worhp") {
        worhp_opts = op.second;
      }
    }

    // Sort Worhp options
    casadi_int nopts = WorhpGetParamCount();
    for (auto&& op : worhp_opts) {
      if (op.first.compare("qp")==0) {
        qp_opts_ = op.second;
        continue;
      }

      // Get corresponding index using a linear search
      casadi_int ind;
      for (ind=1; ind<=nopts; ++ind) {
        // Get name in WORHP
        const char* name = WorhpGetParamName(ind);
        // Break if matching name
        if (op.first.compare(name)==0) break;
      }
      if (ind>nopts) casadi_error("No such Worhp option: " + op.first);

      // Add to the corresponding list
      switch (WorhpGetParamType(ind)) {
      case WORHP_BOOL_T:
        bool_opts_[op.first] = op.second;
        break;
      case WORHP_DOUBLE_T:
        double_opts_[op.first] = op.second;
        break;
      case WORHP_INT_T:
        int_opts_[op.first] = op.second;
        break;
      default:
        casadi_error("Cannot handle WORHP option \"" + op.first + "\": Unknown type " +
          str(WorhpGetParamType(ind)) + ".");
        break;
      }
    }

    // Setup NLP functions
    f_fcn_ = create_function("nlp_f", {"x", "p"}, {"f"});
    g_fcn_ = create_function("nlp_g", {"x", "p"}, {"g"});
    grad_f_fcn_ = create_function("nlp_grad_f", {"x", "p"}, {"f", "grad:f:x"});
    jac_g_fcn_ = create_function("nlp_jac_g", {"x", "p"}, {"g", "jac:g:x"});
    jacg_sp_ = jac_g_fcn_.sparsity_out(1);
    hess_l_fcn_ = create_function("nlp_hess_l", {"x", "p", "lam:f", "lam:g"},
                                  {"transpose:hess:gamma:x:x"},
                                  {{"gamma", {"f", "g"}}});
    hesslag_sp_ = hess_l_fcn_.sparsity_out(0);

    // Temporary vectors
    alloc_w(nx_); // for fetching diagonal entries form Hessian
  }
示例#10
0
  void SnoptInterface::init(const Dict& opts) {
    // Call the init method of the base class
    Nlpsol::init(opts);

    // Default: cold start
    Cold_ = 0;

    // Read user options
    for (auto&& op : opts) {
      if (op.first=="snopt") {
        opts_ = op.second;
      } else if (op.first=="start") {
        std::string start = op.second.to_string();
        if (start=="cold") {
          Cold_ = 0;
        } else if (start=="warm") {
          Cold_ = 1;
        } else if (start=="hot") {
          Cold_ = 2;
        } else {
          casadi_error("Unknown start option: " + start);
        }
      }
    }

    // Get/generate required functions
    jac_f_fcn_ = create_function("nlp_jac_f", {"x", "p"}, {"f", "jac:f:x"});
    jac_g_fcn_ = create_function("nlp_jac_g", {"x", "p"}, {"g", "jac:g:x"});
    jacg_sp_ = jac_g_fcn_.sparsity_out(1);

    // prepare the mapping for constraints
    nnJac_ = nx_;
    nnObj_ = nx_;
    nnCon_ = ng_;

    // Here follows the core of the mapping
    //  Two integer matrices are constructed:
    //  one with gradF sparsity, and one with jacG sparsity
    //  the integer values denote the nonzero locations into the original gradF/jacG
    //  but with a special encoding: entries of gradF are encoded "-1-i" and
    //  entries of jacG are encoded "1+i"
    //  "0" is to be interpreted not as an index but as a literal zero

    IM mapping_jacG  = IM(0, nx_);
    IM mapping_gradF = IM(jac_f_fcn_.sparsity_out(1),
                          range(-1, -1-jac_f_fcn_.nnz_out(1), -1));

    if (!jac_g_fcn_.is_null()) {
      mapping_jacG = IM(jacg_sp_, range(1, jacg_sp_.nnz()+1));
    }

    // First, remap jacG
    A_structure_ = mapping_jacG;

    m_ = ng_;

    // Construct the linear objective row
    IM d = mapping_gradF(Slice(0), Slice());

    std::vector<int> ii = mapping_gradF.sparsity().get_col();
    for (int j = 0; j < nnObj_; ++j) {
      if (d.colind(j) != d.colind(j+1)) {
        int k = d.colind(j);
        d.nz(k) = 0;
      }
    }

    // Make it as sparse as you can
    d = sparsify(d);

    jacF_row_ = d.nnz() != 0;
    if (jacF_row_) {  // We need an objective gradient row
      A_structure_ = vertcat(A_structure_, d);
      m_ +=1;
    }
    iObj_ = jacF_row_ ? (m_ - 1) : -1;

    // Is the A matrix completely empty?
    dummyrow_ = A_structure_.nnz() == 0;  // Then we need a dummy row
    if (dummyrow_) {
      IM dummyrow = IM(1, nx_);
      dummyrow(0, 0) = 0;
      A_structure_ = vertcat(A_structure_, dummyrow);
      m_+=1;
    }

    // We don't need a dummy row if a linear objective row is present
    casadi_assert(!(dummyrow_ && jacF_row_));

    // Allocate temporary memory
    alloc_w(nx_, true); // xk2_
    alloc_w(ng_, true); // lam_gk_
    alloc_w(nx_, true); // lam_xk_
    alloc_w(ng_, true); // gk_
    alloc_w(jac_f_fcn_.nnz_out(1), true); // jac_fk_
    if (!jacg_sp_.is_null()) {
      alloc_w(jacg_sp_.nnz(), true); // jac_gk_
    }
  }
示例#11
0
  void SXFunctionInternal::init() {

    // Call the init function of the base class
    XFunctionInternal<SXFunction, SXFunctionInternal, SX, SXNode>::init();

    // Stack used to sort the computational graph
    stack<SXNode*> s;

    // All nodes
    vector<SXNode*> nodes;

    // Add the list of nodes
    int ind=0;
    for (vector<SX >::iterator it = outputv_.begin(); it != outputv_.end(); ++it, ++ind) {
      int nz=0;
      for (vector<SXElement>::iterator itc = it->begin(); itc != it->end(); ++itc, ++nz) {
        // Add outputs to the list
        s.push(itc->get());
        sort_depth_first(s, nodes);

        // A null pointer means an output instruction
        nodes.push_back(static_cast<SXNode*>(0));
      }
    }

    // Set the temporary variables to be the corresponding place in the sorted graph
    for (int i=0; i<nodes.size(); ++i) {
      if (nodes[i]) {
        nodes[i]->temp = i;
      }
    }

    // Sort the nodes by type
    constants_.clear();
    operations_.clear();
    for (vector<SXNode*>::iterator it = nodes.begin(); it != nodes.end(); ++it) {
      SXNode* t = *it;
      if (t) {
        if (t->isConstant())
          constants_.push_back(SXElement::create(t));
        else if (!t->isSymbolic())
          operations_.push_back(SXElement::create(t));
      }
    }

    // Use live variables?
    bool live_variables = getOption("live_variables");

    // Input instructions
    vector<pair<int, SXNode*> > symb_loc;

    // Current output and nonzero, start with the first one
    int curr_oind, curr_nz=0;
    for (curr_oind=0; curr_oind<outputv_.size(); ++curr_oind) {
      if (outputv_[curr_oind].nnz()!=0) {
        break;
      }
    }

    // Count the number of times each node is used
    vector<int> refcount(nodes.size(), 0);

    // Get the sequence of instructions for the virtual machine
    algorithm_.resize(0);
    algorithm_.reserve(nodes.size());
    for (vector<SXNode*>::iterator it=nodes.begin(); it!=nodes.end(); ++it) {
      // Current node
      SXNode* n = *it;

      // New element in the algorithm
      AlgEl ae;

      // Get operation
      ae.op = n==0 ? OP_OUTPUT : n->getOp();

      // Get instruction
      switch (ae.op) {
      case OP_CONST: // constant
        ae.d = n->getValue();
        ae.i0 = n->temp;
        break;
      case OP_PARAMETER: // a parameter or input
        symb_loc.push_back(make_pair(algorithm_.size(), n));
        ae.i0 = n->temp;
        break;
      case OP_OUTPUT: // output instruction
        ae.i0 = curr_oind;
        ae.i1 = outputv_[curr_oind].at(curr_nz)->temp;
        ae.i2 = curr_nz;

        // Go to the next nonzero
        curr_nz++;
        if (curr_nz>=outputv_[curr_oind].nnz()) {
          curr_nz=0;
          curr_oind++;
          for (; curr_oind<outputv_.size(); ++curr_oind) {
            if (outputv_[curr_oind].nnz()!=0) {
              break;
            }
          }
        }
        break;
      default:       // Unary or binary operation
        ae.i0 = n->temp;
        ae.i1 = n->dep(0).get()->temp;
        ae.i2 = n->dep(1).get()->temp;
      }

      // Number of dependencies
      int ndeps = casadi_math<double>::ndeps(ae.op);

      // Increase count of dependencies
      for (int c=0; c<ndeps; ++c) {
        refcount.at(c==0 ? ae.i1 : ae.i2)++;
      }
      // Add to algorithm
      algorithm_.push_back(ae);
    }

    // Place in the work vector for each of the nodes in the tree (overwrites the reference counter)
    vector<int> place(nodes.size());

    // Stack with unused elements in the work vector
    stack<int> unused;

    // Work vector size
    size_t worksize = 0;

    // Find a place in the work vector for the operation
    for (vector<AlgEl>::iterator it=algorithm_.begin(); it!=algorithm_.end(); ++it) {

      // Number of dependencies
      int ndeps = casadi_math<double>::ndeps(it->op);

      // decrease reference count of children
      // reverse order so that the first argument will end up at the top of the stack
      for (int c=ndeps-1; c>=0; --c) {
        int ch_ind = c==0 ? it->i1 : it->i2;
        int remaining = --refcount.at(ch_ind);
        if (remaining==0) unused.push(place[ch_ind]);
      }

      // Find a place to store the variable
      if (it->op!=OP_OUTPUT) {
        if (live_variables && !unused.empty()) {
          // Try to reuse a variable from the stack if possible (last in, first out)
          it->i0 = place[it->i0] = unused.top();
          unused.pop();
        } else {
          // Allocate a new variable
          it->i0 = place[it->i0] = worksize++;
        }
      }

      // Save the location of the children
      for (int c=0; c<ndeps; ++c) {
        if (c==0) {
          it->i1 = place[it->i1];
        } else {
          it->i2 = place[it->i2];
        }
      }

      // If binary, make sure that the second argument is the same as the first one
      // (in order to treat all operations as binary) NOTE: ugly
      if (ndeps==1 && it->op!=OP_OUTPUT) {
        it->i2 = it->i1;
      }
    }

    if (verbose()) {
      if (live_variables) {
        userOut() << "Using live variables: work array is "
             <<  worksize << " instead of " << nodes.size() << endl;
      } else {
        userOut() << "Live variables disabled." << endl;
      }
    }

    // Allocate work vectors (symbolic/numeric)
    alloc_w(worksize);
    alloc();
    s_work_.resize(worksize);

    // Reset the temporary variables
    for (int i=0; i<nodes.size(); ++i) {
      if (nodes[i]) {
        nodes[i]->temp = 0;
      }
    }

    // Now mark each input's place in the algorithm
    for (vector<pair<int, SXNode*> >::const_iterator it=symb_loc.begin();
         it!=symb_loc.end(); ++it) {
      it->second->temp = it->first+1;
    }

    // Add input instructions
    for (int ind=0; ind<inputv_.size(); ++ind) {
      int nz=0;
      for (vector<SXElement>::iterator itc = inputv_[ind].begin();
          itc != inputv_[ind].end();
          ++itc, ++nz) {
        int i = itc->getTemp()-1;
        if (i>=0) {
          // Mark as input
          algorithm_[i].op = OP_INPUT;

          // Location of the input
          algorithm_[i].i1 = ind;
          algorithm_[i].i2 = nz;

          // Mark input as read
          itc->setTemp(0);
        }
      }
    }

    // Locate free variables
    free_vars_.clear();
    for (vector<pair<int, SXNode*> >::const_iterator it=symb_loc.begin();
         it!=symb_loc.end(); ++it) {
      if (it->second->temp!=0) {
        // Save to list of free parameters
        free_vars_.push_back(SXElement::create(it->second));

        // Remove marker
        it->second->temp=0;
      }
    }

    // Initialize just-in-time compilation for numeric evaluation using OpenCL
    just_in_time_opencl_ = getOption("just_in_time_opencl");
    if (just_in_time_opencl_) {
#ifdef WITH_OPENCL
      freeOpenCL();
      allocOpenCL();
#else // WITH_OPENCL
      casadi_error("Option \"just_in_time_opencl\" true requires CasADi "
                   "to have been compiled with WITH_OPENCL=ON");
#endif // WITH_OPENCL
    }

    // Initialize just-in-time compilation for sparsity propagation using OpenCL
    just_in_time_sparsity_ = getOption("just_in_time_sparsity");
    if (just_in_time_sparsity_) {
#ifdef WITH_OPENCL
      spFreeOpenCL();
      spAllocOpenCL();
#else // WITH_OPENCL
      casadi_error("Option \"just_in_time_sparsity\" true requires CasADi to "
                   "have been compiled with WITH_OPENCL=ON");
#endif // WITH_OPENCL
    }

    if (CasadiOptions::profiling && CasadiOptions::profilingBinary) {

      profileWriteName(CasadiOptions::profilingLog, this, getOption("name"),
                       ProfilingData_FunctionType_SXFunction, algorithm_.size());
      int alg_counter = 0;

      // Iterator to free variables
      vector<SXElement>::const_iterator p_it = free_vars_.begin();

      std::stringstream stream;
      for (vector<AlgEl>::const_iterator it = algorithm_.begin(); it!=algorithm_.end(); ++it) {
        stream.str("");
        if (it->op==OP_OUTPUT) {
          stream << "output[" << it->i0 << "][" << it->i2 << "] = @" << it->i1;
        } else {
          stream << "@" << it->i0 << " = ";
          if (it->op==OP_INPUT) {
            stream << "input[" << it->i1 << "][" << it->i2 << "]";
          } else {
            if (it->op==OP_CONST) {
              stream << it->d;
            } else if (it->op==OP_PARAMETER) {
              stream << *p_it++;
            } else {
              int ndep = casadi_math<double>::ndeps(it->op);
              casadi_math<double>::printPre(it->op, stream);
              for (int c=0; c<ndep; ++c) {
                if (c==0) {
                  stream << "@" << it->i1;
                } else {
                  casadi_math<double>::printSep(it->op, stream);
                  stream << "@" << it->i2;
                }

              }
              casadi_math<double>::printPost(it->op, stream);
            }
          }
        }
        stream << std::endl;
        profileWriteSourceLine(CasadiOptions::profilingLog, this,
                               alg_counter++, stream.str(), it->op);
      }
    }

    // Print
    if (verbose()) {
      userOut() << "SXFunctionInternal::init Initialized " << getOption("name") << " ("
           << algorithm_.size() << " elementary operations)" << endl;
    }
  }
示例#12
0
  void SwitchInternal::init() {
    // Initialize the functions, get input and output sparsities
    // Input and output sparsities
    std::vector<Sparsity> sp_in, sp_out;
    int num_in = -1, num_out=-1;
    for (int k=0; k<=f_.size(); ++k) {
      Function& fk = k<f_.size() ? f_[k] : f_def_;
      if (fk.isNull()) continue;
      fk.init(false);
      if (num_in<0) {
        // Number of inputs and outputs
        num_in=fk.nIn();
        num_out=fk.nOut();
        // Output sparsity
        sp_out.resize(num_out);
        for (int i=0; i<num_out; ++i) sp_out[i] = fk.output(i).sparsity();
        // Input sparsity
        sp_in.resize(num_in);
        for (int i=0; i<num_in; ++i) sp_in[i] = fk.input(i).sparsity();
      } else {
        // Assert matching number of inputs and outputs
        casadi_assert(num_in==fk.nIn());
        casadi_assert(num_out==fk.nOut());
        // Intersect with output sparsity
        for (int i=0; i<num_out; ++i) {
          sp_out[i] = sp_out[i].patternIntersection(fk.output(i).sparsity());
        }
        // Intersect with input sparsity
        for (int i=0; i<num_in; ++i) {
          sp_in[i] = sp_in[i].patternIntersection(fk.input(i).sparsity());
        }
      }
    }

    // Illegal to pass only "null" functions
    casadi_assert_message(num_in>=0, "All functions are null");

    // Allocate input and output buffers
    ibuf_.resize(1+num_in);
    input(0) = 0; // conditional
    for (int i=0; i<num_in; ++i) input(i+1) = DMatrix::zeros(sp_in[i]);
    obuf_.resize(num_out);
    for (int i=0; i<num_out; ++i) output(i) = DMatrix::zeros(sp_out[i]);

    // Call the initialization method of the base class
    FunctionInternal::init();

    // Get required work
    for (int k=0; k<=f_.size(); ++k) {
      const Function& fk = k<f_.size() ? f_[k] : f_def_;
      if (fk.isNull()) continue;

      // Get local work vector sizes
      alloc(fk);
      size_t sz_w = fk.sz_w();

      // Add size for input buffers
      for (int i=1; i<nIn(); ++i) {
        const Sparsity& s = fk.input(i-1).sparsity();
        if (s!=input(i).sparsity()) sz_w += s.nnz();
      }

      // Add size for output buffers
      for (int i=0; i<nOut(); ++i) {
        const Sparsity& s = fk.output(i).sparsity();
        if (s!=output(i).sparsity()) sz_w += s.nnz();
      }

      // Make sure enough work for this
      alloc_w(sz_w);
    }
  }
示例#13
0
  void SundialsInterface::init(const Dict& opts) {
    // Call the base class method
    Integrator::init(opts);

    // Default options
    abstol_ = 1e-8;
    reltol_ = 1e-6;
    exact_jacobian_ = true;
    max_num_steps_ = 10000;
    finite_difference_fsens_ = false;
    stop_at_end_ = true;
    use_preconditioner_ = false;
    max_krylov_ = 10;
    string linear_solver_type = "dense";
    string iterative_solver = "gmres";
    string pretype = "none";
    string linear_solver;
    Dict linear_solver_options;
    upper_bandwidth_ = -1;
    lower_bandwidth_ = -1;
    upper_bandwidthB_ = -1;
    lower_bandwidthB_ = -1;
    quad_err_con_ = false;
    interpolation_type_ = "hermite";
    steps_per_checkpoint_ = 20;
    disable_internal_warnings_ = false;
    max_multistep_order_ = 5;

    // Read options
    for (auto&& op : opts) {
      if (op.first=="abstol") {
        abstol_ = op.second;
      } else if (op.first=="reltol") {
        reltol_ = op.second;
      } else if (op.first=="exact_jacobian") {
        exact_jacobian_ = op.second;
      } else if (op.first=="max_num_steps") {
        max_num_steps_ = op.second;
      } else if (op.first=="finite_difference_fsens") {
        finite_difference_fsens_ = op.second;
      } else if (op.first=="stop_at_end") {
        stop_at_end_ = op.second;
      } else if (op.first=="use_preconditioner") {
        use_preconditioner_ = op.second;
      } else if (op.first=="max_krylov") {
        max_krylov_ = op.second;
      } else if (op.first=="linear_solver_type") {
        linear_solver_type = op.second.to_string();
      } else if (op.first=="iterative_solver") {
        iterative_solver = op.second.to_string();
      } else if (op.first=="pretype") {
        pretype = op.second.to_string();
      } else if (op.first=="linear_solver") {
        linear_solver = op.second.to_string();
      } else if (op.first=="linear_solver_options") {
        linear_solver_options = op.second;
      } else if (op.first=="upper_bandwidth") {
        upper_bandwidth_ = op.second;
      } else if (op.first=="lower_bandwidth") {
        lower_bandwidth_ = op.second;
      } else if (op.first=="upper_bandwidthB") {
        upper_bandwidthB_ = op.second;
      } else if (op.first=="lower_bandwidthB") {
        lower_bandwidthB_ = op.second;
      } else if (op.first=="quad_err_con") {
        quad_err_con_ = op.second;
      } else if (op.first=="interpolation_type") {
        interpolation_type_ = op.second.to_string();
      } else if (op.first=="steps_per_checkpoint") {
        steps_per_checkpoint_ = op.second;
      } else if (op.first=="disable_internal_warnings") {
        disable_internal_warnings_ = op.second;
      } else if (op.first=="max_multistep_order") {
        max_multistep_order_ = op.second;
      }
    }

    // Default dependent options
    exact_jacobianB_ = exact_jacobian_;
    fsens_abstol_ = abstol_;
    fsens_reltol_ = reltol_;
    abstolB_ = abstol_;
    reltolB_ = reltol_;
    use_preconditionerB_ = use_preconditioner_;
    max_krylovB_ = max_krylov_;
    std::string linear_solver_typeB = linear_solver_type;
    std::string iterative_solverB = iterative_solver;
    std::string pretypeB = pretype;
    string linear_solverB = linear_solver;
    Dict linear_solver_optionsB = linear_solver_options;

    // Read options again
    for (auto&& op : opts) {
      if (op.first=="exact_jacobianB") {
        exact_jacobianB_ = op.second;
      } else if (op.first=="fsens_abstol") {
        fsens_abstol_ = op.second;
      } else if (op.first=="fsens_reltol") {
        fsens_reltol_ = op.second;
      } else if (op.first=="abstolB") {
        abstolB_ = op.second;
      } else if (op.first=="reltolB") {
        reltolB_ = op.second;
      } else if (op.first=="use_preconditionerB") {
        use_preconditionerB_ = op.second;
      } else if (op.first=="max_krylovB") {
        max_krylovB_ = op.second;
      } else if (op.first=="linear_solver_typeB") {
        linear_solver_typeB = op.second.to_string();
      } else if (op.first=="iterative_solverB") {
        iterative_solverB = op.second.to_string();
      } else if (op.first=="pretypeB") {
        pretypeB = op.second.to_string();
      } else if (op.first=="linear_solverB") {
        linear_solverB = op.second.to_string();
      } else if (op.first=="linear_solver_optionsB") {
        linear_solver_optionsB = op.second;
      }
    }

    // No Jacobian of g if g doesn't exist
    if (g_.is_null()) {
      exact_jacobianB_ = false;
    }

    // Linear solver for forward integration
    if (linear_solver_type=="dense") {
      linsol_f_ = SD_DENSE;
    } else if (linear_solver_type=="banded") {
      linsol_f_ = SD_BANDED;
    } else if (linear_solver_type=="iterative") {
      linsol_f_ = SD_ITERATIVE;

      // Iterative solver
      if (iterative_solver=="gmres") {
        itsol_f_ = SD_GMRES;
      } else if (iterative_solver=="bcgstab") {
        itsol_f_ = SD_BCGSTAB;
      } else if (iterative_solver=="tfqmr") {
        itsol_f_ = SD_TFQMR;
      } else {
        casadi_error("Unknown iterative solver for forward integration: " + iterative_solver);
      }

      // Preconditioning type
      if (pretype=="none") {
        pretype_f_ = PREC_NONE;
      } else if (pretype=="left") {
        pretype_f_ = PREC_LEFT;
      } else if (pretype=="right") {
        pretype_f_ = PREC_RIGHT;
      } else if (pretype=="both") {
        pretype_f_ = PREC_BOTH;
      } else {
        casadi_error("Unknown preconditioning type for forward integration: " + pretype);
      }
    } else if (linear_solver_type=="user_defined") {
      linsol_f_ = SD_USER_DEFINED;
    } else {
      casadi_error("Unknown linear solver for forward integration: " + linear_solver_type);
    }

    // Linear solver for backward integration
    if (linear_solver_typeB=="dense") {
      linsol_g_ = SD_DENSE;
    } else if (linear_solver_typeB=="banded") {
      linsol_g_ = SD_BANDED;
    } else if (linear_solver_typeB=="iterative") {
      linsol_g_ = SD_ITERATIVE;

      // Iterative solver
      if (iterative_solverB=="gmres") {
        itsol_g_ = SD_GMRES;
      } else if (iterative_solverB=="bcgstab") {
        itsol_g_ = SD_BCGSTAB;
      } else if (iterative_solverB=="tfqmr") {
        itsol_g_ = SD_TFQMR;
      } else {
        casadi_error("Unknown sparse solver for backward integration: " + iterative_solverB);
      }

      // Preconditioning type
      if (pretypeB=="none") {
        pretype_g_ = PREC_NONE;
      } else if (pretypeB=="left") {
        pretype_g_ = PREC_LEFT;
      } else if (pretypeB=="right") {
        pretype_g_ = PREC_RIGHT;
      } else if (pretypeB=="both") {
        pretype_g_ = PREC_BOTH;
      } else {
        casadi_error("Unknown preconditioning type for backward integration: " + pretypeB);
      }
    } else if (linear_solver_typeB=="user_defined") {
      linsol_g_ = SD_USER_DEFINED;
    } else {
      casadi_error("Unknown linear solver for backward integration: " + iterative_solverB);
    }

    // Create a Jacobian if requested
    if (exact_jacobian_) {
      jac_ = getJac();
      alloc(jac_);
      alloc_w(jac_.nnz_out(0), true);
    }

    if (!jac_.is_null()) {
      casadi_assert_message(jac_.size2_out(0)==jac_.size1_out(0),
                            "SundialsInterface::init: the jacobian of the forward problem must "
                            "be square but got " << jac_.sparsity_out(0).dim());

      casadi_assert_message(!jac_.sparsity_out(0).is_singular(),
                            "SundialsInterface::init: singularity - the jacobian of the forward "
                            "problem is structurally rank-deficient. sprank(J)="
                            << sprank(jac_.sparsity_out(0)) << " (in stead of "<< jac_.size2_out(0)
                            << ")");
    }

    // Create a backwards Jacobian if requested
    if (exact_jacobianB_ && !g_.is_null()) jacB_ = getJacB();

    if (!jacB_.is_null()) {
      alloc(jacB_);
      alloc_w(jacB_.nnz_out(0), true);
      casadi_assert_message(jacB_.size2_out(0)==jacB_.size1_out(0),
                            "SundialsInterface::init: the jacobian of the backward problem must be "
                            "square but got " << jacB_.sparsity_out(0).dim());

      casadi_assert_message(!jacB_.sparsity_out(0).is_singular(),
                            "SundialsInterface::init: singularity - the jacobian of the backward"
                            " problem is structurally rank-deficient. sprank(J)="
                            << sprank(jacB_.sparsity_out(0)) << " (instead of "
                            << jacB_.size2_out(0) << ")");
    }

    // Create a linear solver
    if (!linear_solver.empty() && !jac_.is_null()) {
      linsol_ = linsol("linsol", linear_solver, jac_.sparsity_out(0),
                       1, linear_solver_options);
      alloc(linsol_);
    }

    // Create a linear solver
    if (!linear_solverB.empty() && !jacB_.is_null()) {
      linsolB_ = linsol("linsolB", linear_solverB, jacB_.sparsity_out(0),
                        1, linear_solver_optionsB);
      alloc(linsolB_);
    }

    // Allocate temporary memory
    //alloc_w(np_, true); // p_
    //alloc_w(nrp_, true); // rp_
  }
示例#14
0
  void BonminInterface::init(const Dict& opts) {
    // Call the init method of the base class
    Nlpsol::init(opts);

    // Default options
    pass_nonlinear_variables_ = false;
    Dict hess_lag_options, jac_g_options, grad_f_options;

    // Read user options
    for (auto&& op : opts) {
      if (op.first=="bonmin") {
        opts_ = op.second;
      } else if (op.first=="pass_nonlinear_variables") {
        pass_nonlinear_variables_ = op.second;
      } else if (op.first=="var_string_md") {
        var_string_md_ = op.second;
      } else if (op.first=="var_integer_md") {
        var_integer_md_ = op.second;
      } else if (op.first=="var_numeric_md") {
        var_numeric_md_ = op.second;
      } else if (op.first=="con_string_md") {
        con_string_md_ = op.second;
      } else if (op.first=="con_integer_md") {
        con_integer_md_ = op.second;
      } else if (op.first=="con_numeric_md") {
        con_numeric_md_ = op.second;
      } else if (op.first=="hess_lag_options") {
        hess_lag_options = op.second;
      } else if (op.first=="jac_g_options") {
        jac_g_options = op.second;
      } else if (op.first=="grad_f_options") {
        grad_f_options = op.second;
      } else if (op.first=="hess_lag") {
        Function f = op.second;
        casadi_assert(f.n_in()==4);
        casadi_assert(f.n_out()==1);
        set_function(f, "nlp_hess_l");
      } else if (op.first=="jac_g") {
        Function f = op.second;
        casadi_assert(f.n_in()==2);
        casadi_assert(f.n_out()==2);
        set_function(f, "nlp_jac_g");
      } else if (op.first=="grad_f") {
        Function f = op.second;
        casadi_assert(f.n_in()==2);
        casadi_assert(f.n_out()==2);
        set_function(f, "nlp_grad_f");
      }
    }

    // Do we need second order derivatives?
    exact_hessian_ = true;
    auto hessian_approximation = opts_.find("hessian_approximation");
    if (hessian_approximation!=opts_.end()) {
      exact_hessian_ = hessian_approximation->second == "exact";
    }

    // Setup NLP functions
    create_function("nlp_f", {"x", "p"}, {"f"});
    create_function("nlp_g", {"x", "p"}, {"g"});
    if (!has_function("nlp_grad_f")) {
      create_function("nlp_grad_f", {"x", "p"}, {"f", "grad:f:x"});
    }
    if (!has_function("nlp_jac_g")) {
      create_function("nlp_jac_g", {"x", "p"}, {"g", "jac:g:x"});
    }
    jacg_sp_ = get_function("nlp_jac_g").sparsity_out(1);

    // Allocate temporary work vectors
    if (exact_hessian_) {
      if (!has_function("nlp_hess_l")) {
        create_function("nlp_hess_l", {"x", "p", "lam:f", "lam:g"},
                        {"hess:gamma:x:x"}, {{"gamma", {"f", "g"}}});
      }
      hesslag_sp_ = get_function("nlp_hess_l").sparsity_out(0);
    } else if (pass_nonlinear_variables_) {
      nl_ex_ = oracle_.which_depends("x", {"f", "g"}, 2, false);
    }

    // Allocate work vectors
    alloc_w(nx_, true); // xk_
    alloc_w(ng_, true); // lam_gk_
    alloc_w(nx_, true); // lam_xk_
    alloc_w(ng_, true); // gk_
    alloc_w(nx_, true); // grad_fk_
    alloc_w(jacg_sp_.nnz(), true); // jac_gk_
    if (exact_hessian_) {
      alloc_w(hesslag_sp_.nnz(), true); // hess_lk_
    }
  }
示例#15
0
  void OoqpInterface::init(const Dict& opts) {
    // Initialize the base classes
    Conic::init(opts);

    // Default options
    print_level_ = 0;
    mutol_ = 1e-8;
    artol_ = 1e-8;

    // Read options
    for (auto&& op : opts) {
      if (op.first=="print_level") {
        print_level_ = op.second;
      } else if (op.first=="mutol") {
        mutol_ = op.second;
      } else if (op.first=="artol") {
        artol_ = op.second;
      }
    }

    // Allocate memory for problem
    nQ_ = H_.nnz_upper();
    nA_ = nnz_in(CONIC_A);
    nH_ = nnz_in(CONIC_H);
    spAT_ = A_.T();

    // Allocate work vectors
    alloc_w(nx_, true); // g
    alloc_w(nx_, true); // lbx
    alloc_w(nx_, true); // ubx
    alloc_w(na_, true); // lba
    alloc_w(na_, true); // uba
    alloc_w(nH_, true); // H
    alloc_w(nA_, true); // A
    alloc_w(nx_, true); // c_
    alloc_w(na_, true); // bA_
    alloc_w(nx_, true); // xlow_
    alloc_w(nx_, true); // xupp_
    alloc_w(na_, true); // clow_
    alloc_w(na_, true); // cupp_
    alloc_w(nx_, true); // x_
    alloc_w(nx_, true); // gamma_
    alloc_w(nx_, true); // phi_
    alloc_w(na_, true); // y_
    alloc_w(na_, true); // z_
    alloc_w(na_, true); // lambda_
    alloc_w(na_, true); // pi_
    alloc_iw(nx_, true); // ixlow_
    alloc_iw(nx_, true); // ixupp_
    alloc_iw(na_, true); // iclow_
    alloc_iw(na_, true); // icupp_
    alloc_w(nQ_, true); // dQ_
    alloc_w(nA_, true); // dA_
    alloc_w(nA_, true); // dC_
    alloc_iw(nQ_, true); // irowQ_
    alloc_iw(nQ_, true); // jcolQ_
    alloc_iw(nA_, true); // irowA_
    alloc_iw(nA_, true); // jcolA_
    alloc_iw(nA_, true); // irowC_
    alloc_iw(nA_, true); // jcolC_
    alloc_iw(nx_, true); // x_index_
    alloc_iw(na_, true); // c_index_
    alloc_w(nx_, true); // p_
    alloc_w(nA_, true); // AT
    alloc_iw(na_); // casadi_trans
  }