Esempio n. 1
0
SXFunction vec (const SXFunction &a) {
  // Pass null if input is null
  if (a.isNull()) return SXFunction();
  
  /// Get the SX input and output vectors
  std::vector<SXMatrix> symbolicInputSX = a.inputsSX();
  std::vector<SXMatrix> symbolicOutputSX = a.outputsSX();
  
  // Apply vec to them
  for (int i=0;i<symbolicInputSX.size();++i)
    symbolicInputSX[i] = vec(symbolicInputSX[i]);
  for (int i=0;i<symbolicOutputSX.size();++i)
    symbolicOutputSX[i] = vec(symbolicOutputSX[i]);
    
  // Make a new function with the vecced input/outputs
  SXFunction ret(symbolicInputSX,symbolicOutputSX);
  
  // Initialize it if a was
  if (a.isInit()) ret.init();
  return ret;
}
Esempio n. 2
0
  void SymbolicQr::init() {
    // Call the base class initializer
    LinearSolverInternal::init();

    // Read options
    bool codegen = getOption("codegen");
    string compiler = getOption("compiler");

    // Make sure that command processor is available
    if (codegen) {
#ifdef WITH_DL
      int flag = system(static_cast<const char*>(0));
      casadi_assert_message(flag!=0, "No command procesor available");
#else // WITH_DL
      casadi_error("Codegen requires CasADi to be compiled with option \"WITH_DL\" enabled");
#endif // WITH_DL
    }

    // Symbolic expression for A
    SX A = SX::sym("A", input(0).sparsity());

    // Get the inverted column permutation
    std::vector<int> inv_colperm(colperm_.size());
    for (int k=0; k<colperm_.size(); ++k)
      inv_colperm[colperm_[k]] = k;

    // Get the inverted row permutation
    std::vector<int> inv_rowperm(rowperm_.size());
    for (int k=0; k<rowperm_.size(); ++k)
      inv_rowperm[rowperm_[k]] = k;

    // Permute the linear system
    SX Aperm = A(rowperm_, colperm_);

    // Generate the QR factorization function
    vector<SX> QR(2);
    qr(Aperm, QR[0], QR[1]);
    SXFunction fact_fcn(A, QR);

    // Optionally generate c code and load as DLL
    if (codegen) {
      stringstream ss;
      ss << "symbolic_qr_fact_fcn_" << this;
      fact_fcn_ = dynamicCompilation(fact_fcn, ss.str(),
                                     "Symbolic QR factorization function", compiler);
    } else {
      fact_fcn_ = fact_fcn;
    }

    // Initialize factorization function
    fact_fcn_.setOption("name", "QR_fact");
    fact_fcn_.init();

    // Symbolic expressions for solve function
    SX Q = SX::sym("Q", QR[0].sparsity());
    SX R = SX::sym("R", QR[1].sparsity());
    SX b = SX::sym("b", input(1).size1(), 1);

    // Solve non-transposed
    // We have Pb' * Q * R * Px * x = b <=> x = Px' * inv(R) * Q' * Pb * b

    // Permute the right hand sides
    SX bperm = b(rowperm_, ALL);

    // Solve the factorized system
    SX xperm = casadi::solve(R, mul(Q.T(), bperm));

    // Permute back the solution
    SX x = xperm(inv_colperm, ALL);

    // Generate the QR solve function
    vector<SX> solv_in(3);
    solv_in[0] = Q;
    solv_in[1] = R;
    solv_in[2] = b;
    SXFunction solv_fcn(solv_in, x);

    // Optionally generate c code and load as DLL
    if (codegen) {
      stringstream ss;
      ss << "symbolic_qr_solv_fcn_N_" << this;
      solv_fcn_N_ = dynamicCompilation(solv_fcn, ss.str(), "QR_solv_N", compiler);
    } else {
      solv_fcn_N_ = solv_fcn;
    }

    // Initialize solve function
    solv_fcn_N_.setOption("name", "QR_solv");
    solv_fcn_N_.init();

    // Solve transposed
    // We have (Pb' * Q * R * Px)' * x = b
    // <=> Px' * R' * Q' * Pb * x = b
    // <=> x = Pb' * Q * inv(R') * Px * b

    // Permute the right hand side
    bperm = b(colperm_, ALL);

    // Solve the factorized system
    xperm = mul(Q, casadi::solve(R.T(), bperm));

    // Permute back the solution
    x = xperm(inv_rowperm, ALL);

    // Mofify the QR solve function
    solv_fcn = SXFunction(solv_in, x);

    // Optionally generate c code and load as DLL
    if (codegen) {
      stringstream ss;
      ss << "symbolic_qr_solv_fcn_T_" << this;
      solv_fcn_T_ = dynamicCompilation(solv_fcn, ss.str(), "QR_solv_T", compiler);
    } else {
      solv_fcn_T_ = solv_fcn;
    }

    // Initialize solve function
    solv_fcn_T_.setOption("name", "QR_solv_T");
    solv_fcn_T_.init();

    // Allocate storage for QR factorization
    Q_ = DMatrix::zeros(Q.sparsity());
    R_ = DMatrix::zeros(R.sparsity());
  }
Esempio n. 3
0
  Function NlpSolver::joinFG(Function F, Function G) {
    if (G.isNull()) {
      // unconstrained
      if (is_a<SXFunction>(F)) {
        SXFunction F_sx = shared_cast<SXFunction>(F);
        vector<SX> nlp_in = F_sx.inputExpr();
        nlp_in.resize(NL_NUM_IN);
        vector<SX> nlp_out(NL_NUM_OUT);
        nlp_out[NL_F] = F_sx.outputExpr(0);
        return SXFunction(nlp_in, nlp_out);
      } else if (is_a<MXFunction>(F)) {
        MXFunction F_mx = shared_cast<MXFunction>(F);
        vector<MX> nlp_in = F_mx.inputExpr();
        nlp_in.resize(NL_NUM_IN);
        vector<MX> nlp_out(NL_NUM_OUT);
        nlp_out[NL_F] = F_mx.outputExpr(0);
        return MXFunction(nlp_in, nlp_out);
      } else {
        vector<MX> F_in = F.symbolicInput();
        vector<MX> nlp_in(NL_NUM_IN);
        nlp_in[NL_X] = F_in.at(0);
        if (F_in.size()>1) nlp_in[NL_P] = F_in.at(1);
        vector<MX> nlp_out(NL_NUM_OUT);
        nlp_out[NL_F] = F(F_in).front();
        return MXFunction(nlp_in, nlp_out);
      }
    } else if (F.isNull()) {
      // feasibility problem
      if (is_a<SXFunction>(G)) {
        SXFunction G_sx = shared_cast<SXFunction>(G);
        vector<SX> nlp_in = G_sx.inputExpr();
        nlp_in.resize(NL_NUM_IN);
        vector<SX> nlp_out(NL_NUM_OUT);
        nlp_out[NL_G] = G_sx.outputExpr(0);
        return SXFunction(nlp_in, nlp_out);
      } else if (is_a<MXFunction>(G)) {
        MXFunction G_mx = shared_cast<MXFunction>(F);
        vector<MX> nlp_in = G_mx.inputExpr();
        nlp_in.resize(NL_NUM_IN);
        vector<MX> nlp_out(NL_NUM_OUT);
        nlp_out[NL_G] = G_mx.outputExpr(0);
        nlp_out.resize(NL_NUM_OUT);
        return MXFunction(nlp_in, nlp_out);
      } else {
        vector<MX> G_in = G.symbolicInput();
        vector<MX> nlp_in(NL_NUM_IN);
        nlp_in[NL_X] = G_in.at(0);
        if (G_in.size()>1) nlp_in[NL_P] = G_in.at(1);
        vector<MX> nlp_out(NL_NUM_OUT);
        nlp_out[NL_G] = G(G_in).at(0);
        return MXFunction(nlp_in, nlp_out);
      }
    } else {
      // Standard (constrained) NLP

      // SXFunction if both functions are SXFunction
      if (is_a<SXFunction>(F) && is_a<SXFunction>(G)) {
        vector<SX> nlp_in(NL_NUM_IN), nlp_out(NL_NUM_OUT);
        SXFunction F_sx = shared_cast<SXFunction>(F);
        SXFunction G_sx = shared_cast<SXFunction>(G);
        nlp_in[NL_X] = G_sx.inputExpr(0);
        if (G_sx.getNumInputs()>1) {
          nlp_in[NL_P] = G_sx.inputExpr(1);
        } else {
          nlp_in[NL_P] = SX::sym("p", 1, 0);
        }

        // Expression for f and g
        nlp_out[NL_G] = G_sx.outputExpr(0);
        nlp_out[NL_F] = substitute(F_sx.outputExpr(), F_sx.inputExpr(), G_sx.inputExpr()).front();

        return SXFunction(nlp_in, nlp_out);
      } else { // MXFunction otherwise
        vector<MX> nlp_in(NL_NUM_IN), nlp_out(NL_NUM_OUT);

        // Try to cast into MXFunction
        MXFunction F_mx = shared_cast<MXFunction>(F);
        MXFunction G_mx = shared_cast<MXFunction>(G);

        // Convert to MX if cast failed and make sure that they
        // use the same expressions if cast was successful
        if (!G_mx.isNull()) {
          nlp_in[NL_X] = G_mx.inputExpr(0);
          if (G_mx.getNumInputs()>1) {
            nlp_in[NL_P] = G_mx.inputExpr(1);
          } else {
            nlp_in[NL_P] = MX::sym("p", 1, 0);
          }
          nlp_out[NL_G] = G_mx.outputExpr(0);
          if (!F_mx.isNull()) { // Both are MXFunction, make sure they use the same variables
            nlp_out[NL_F] = substitute(F_mx.outputExpr(), F_mx.inputExpr(),
                                       G_mx.inputExpr()).front();
          } else { // G_ but not F_ MXFunction
            nlp_out[NL_F] = F(G_mx.inputExpr()).front();
          }
        } else {
          if (!F_mx.isNull()) { // F but not G MXFunction
            nlp_in[NL_X] = F_mx.inputExpr(0);
            if (F_mx.getNumInputs()>1) {
              nlp_in[NL_P] = F_mx.inputExpr(1);
            } else {
              nlp_in[NL_P] = MX::sym("p", 1, 0);
            }
            nlp_out[NL_F] = F_mx.outputExpr(0);
            nlp_out[NL_G] = G(F_mx.inputExpr()).front();
          } else { // None of them MXFunction
            vector<MX> FG_in = G.symbolicInput();
            nlp_in[NL_X] = FG_in.at(0);
            if (FG_in.size()>1) nlp_in[NL_P] = FG_in.at(1);
            nlp_out[NL_G] = G(FG_in).front();
            nlp_out[NL_F] = F(FG_in).front();
          }
        }
        return MXFunction(nlp_in, nlp_out);
      } // SXFunction/MXFunction
    } // constrained/unconstrained
  }
Esempio n. 4
0
  void Sqpmethod::init() {
    // Call the init method of the base class
    NlpSolverInternal::init();

    // Read options
    max_iter_ = getOption("max_iter");
    max_iter_ls_ = getOption("max_iter_ls");
    c1_ = getOption("c1");
    beta_ = getOption("beta");
    merit_memsize_ = getOption("merit_memory");
    lbfgs_memory_ = getOption("lbfgs_memory");
    tol_pr_ = getOption("tol_pr");
    tol_du_ = getOption("tol_du");
    regularize_ = getOption("regularize");
    exact_hessian_ = getOption("hessian_approximation")=="exact";
    min_step_size_ = getOption("min_step_size");

    // Get/generate required functions
    gradF();
    jacG();
    if (exact_hessian_) {
      hessLag();
    }

    // Allocate a QP solver
    Sparsity H_sparsity = exact_hessian_ ? hessLag().output().sparsity()
        : Sparsity::dense(nx_, nx_);
    H_sparsity = H_sparsity + Sparsity::diag(nx_);
    Sparsity A_sparsity = jacG().isNull() ? Sparsity(0, nx_)
        : jacG().output().sparsity();

    // QP solver options
    Dict qp_solver_options;
    if (hasSetOption("qp_solver_options")) {
      qp_solver_options = getOption("qp_solver_options");
    }

    // Allocate a QP solver
    qp_solver_ = QpSolver("qp_solver", getOption("qp_solver"),
                          make_map("h", H_sparsity, "a", A_sparsity),
                          qp_solver_options);

    // Lagrange multipliers of the NLP
    mu_.resize(ng_);
    mu_x_.resize(nx_);

    // Lagrange gradient in the next iterate
    gLag_.resize(nx_);
    gLag_old_.resize(nx_);

    // Current linearization point
    x_.resize(nx_);
    x_cand_.resize(nx_);
    x_old_.resize(nx_);

    // Constraint function value
    gk_.resize(ng_);
    gk_cand_.resize(ng_);

    // Hessian approximation
    Bk_ = DMatrix::zeros(H_sparsity);

    // Jacobian
    Jk_ = DMatrix::zeros(A_sparsity);

    // Bounds of the QP
    qp_LBA_.resize(ng_);
    qp_UBA_.resize(ng_);
    qp_LBX_.resize(nx_);
    qp_UBX_.resize(nx_);

    // QP solution
    dx_.resize(nx_);
    qp_DUAL_X_.resize(nx_);
    qp_DUAL_A_.resize(ng_);

    // Gradient of the objective
    gf_.resize(nx_);

    // Create Hessian update function
    if (!exact_hessian_) {
      // Create expressions corresponding to Bk, x, x_old, gLag and gLag_old
      SX Bk = SX::sym("Bk", H_sparsity);
      SX x = SX::sym("x", input(NLP_SOLVER_X0).sparsity());
      SX x_old = SX::sym("x", x.sparsity());
      SX gLag = SX::sym("gLag", x.sparsity());
      SX gLag_old = SX::sym("gLag_old", x.sparsity());

      SX sk = x - x_old;
      SX yk = gLag - gLag_old;
      SX qk = mul(Bk, sk);

      // Calculating theta
      SX skBksk = inner_prod(sk, qk);
      SX omega = if_else(inner_prod(yk, sk) < 0.2 * inner_prod(sk, qk),
                               0.8 * skBksk / (skBksk - inner_prod(sk, yk)),
                               1);
      yk = omega * yk + (1 - omega) * qk;
      SX theta = 1. / inner_prod(sk, yk);
      SX phi = 1. / inner_prod(qk, sk);
      SX Bk_new = Bk + theta * mul(yk, yk.T()) - phi * mul(qk, qk.T());

      // Inputs of the BFGS update function
      vector<SX> bfgs_in(BFGS_NUM_IN);
      bfgs_in[BFGS_BK] = Bk;
      bfgs_in[BFGS_X] = x;
      bfgs_in[BFGS_X_OLD] = x_old;
      bfgs_in[BFGS_GLAG] = gLag;
      bfgs_in[BFGS_GLAG_OLD] = gLag_old;
      bfgs_ = SXFunction("bfgs", bfgs_in, make_vector(Bk_new));

      // Initial Hessian approximation
      B_init_ = DMatrix::eye(nx_);
    }

    // Header
    if (static_cast<bool>(getOption("print_header"))) {
      userOut()
        << "-------------------------------------------" << endl
        << "This is casadi::SQPMethod." << endl;
      if (exact_hessian_) {
        userOut() << "Using exact Hessian" << endl;
      } else {
        userOut() << "Using limited memory BFGS Hessian approximation" << endl;
      }
      userOut()
        << endl
        << "Number of variables:                       " << setw(9) << nx_ << endl
        << "Number of constraints:                     " << setw(9) << ng_ << endl
        << "Number of nonzeros in constraint Jacobian: " << setw(9) << A_sparsity.nnz() << endl
        << "Number of nonzeros in Lagrangian Hessian:  " << setw(9) << H_sparsity.nnz() << endl
        << endl;
    }
  }
Esempio n. 5
0
  Function implicitRK(Function& f, const std::string& impl, const Dictionary& impl_options,
                      const MX& tf, int order, const std::string& scheme, int ne) {
    casadi_assert_message(ne>=1, "Parameter ne (number of elements must be at least 1), "
                          "but got " << ne << ".");
    casadi_assert_message(order==4, "Only RK order 4 is supported now.");
    casadi_assert_message(f.getNumInputs()==DAE_NUM_IN && f.getNumOutputs()==DAE_NUM_OUT,
                          "Supplied function must adhere to dae scheme.");
    casadi_assert_message(f.output(DAE_QUAD).isEmpty(),
                          "Supplied function cannot have quadrature states.");

    // Obtain collocation points
    std::vector<double> tau_root = collocationPoints(order, "legendre");

    // Retrieve collocation interpolating matrices
    std::vector < std::vector <double> > C;
    std::vector < double > D;
    collocationInterpolators(tau_root, C, D);

    // Retrieve problem dimensions
    int nx = f.input(DAE_X).size();
    int nz = f.input(DAE_Z).size();
    int np = f.input(DAE_P).size();

    //Variables for one finite element
    MX X = MX::sym("X", nx);
    MX P = MX::sym("P", np);
    MX V = MX::sym("V", order*(nx+nz)); // Unknowns

    MX X0 = X;

    // Components of the unknowns that correspond to states at collocation points
    std::vector<MX> Xc;Xc.reserve(order);
    Xc.push_back(X0);

    // Components of the unknowns that correspond to algebraic states at collocation points
    std::vector<MX> Zc;Zc.reserve(order);

    // Splitting the unknowns
    std::vector<int> splitPositions = range(0, order*nx, nx);
    if (nz>0) {
      std::vector<int> Zc_pos = range(order*nx, order*nx+(order+1)*nz, nz);
      splitPositions.insert(splitPositions.end(), Zc_pos.begin(), Zc_pos.end());
    } else {
      splitPositions.push_back(order*nx);
    }
    std::vector<MX> Vs = vertsplit(V, splitPositions);

    // Extracting unknowns from Z
    for (int i=0;i<order;++i) {
      Xc.push_back(X0+Vs[i]);
    }
    if (nz>0) {
      for (int i=0;i<order;++i) {
        Zc.push_back(Vs[order+i]);
      }
    }

    // Get the collocation Equations (that define V)
    std::vector<MX> V_eq;

    // Local start time
    MX t0_l=MX::sym("t0");
    MX h = MX::sym("h");

    for (int j=1;j<order+1;++j) {
      // Expression for the state derivative at the collocation point
      MX xp_j = 0;
      for (int r=0;r<order+1;++r) {
        xp_j+= C[j][r]*Xc[r];
      }
      // Append collocation equations & algebraic constraints
      std::vector<MX> f_out;
      MX t_l = t0_l+tau_root[j]*h;
      if (nz>0) {
        f_out = f.call(daeIn("t", t_l, "x", Xc[j], "p", P, "z", Zc[j-1]));
      } else {
        f_out = f.call(daeIn("t", t_l, "x", Xc[j], "p", P));
      }
      V_eq.push_back(h*f_out[DAE_ODE]-xp_j);
      V_eq.push_back(f_out[DAE_ALG]);

    }

    // Root-finding function, implicitly defines V as a function of X0 and P
    std::vector<MX> vfcn_inputs;
    vfcn_inputs.push_back(V);
    vfcn_inputs.push_back(X);
    vfcn_inputs.push_back(P);
    vfcn_inputs.push_back(t0_l);
    vfcn_inputs.push_back(h);

    Function vfcn = MXFunction(vfcn_inputs, vertcat(V_eq));
    vfcn.init();

    try {
      // Attempt to convert to SXFunction to decrease overhead
      vfcn = SXFunction(vfcn);
      vfcn.init();
    } catch(CasadiException & e) {
      //
    }

    // Create a implicit function instance to solve the system of equations
    ImplicitFunction ifcn(impl, vfcn, Function(), LinearSolver());
    ifcn.setOption(impl_options);
    ifcn.init();

    // Get an expression for the state at the end of the finite element
    std::vector<MX> ifcn_call_in(5);
    ifcn_call_in[0] = MX::zeros(V.sparsity());
    std::copy(vfcn_inputs.begin()+1, vfcn_inputs.end(), ifcn_call_in.begin()+1);
    std::vector<MX> ifcn_call_out = ifcn.call(ifcn_call_in, true);
    Vs = vertsplit(ifcn_call_out[0], splitPositions);

    MX XF = 0;
    for (int i=0;i<order+1;++i) {
      XF += D[i]*(i==0? X : X + Vs[i-1]);
    }


    // Get the discrete time dynamics
    ifcn_call_in.erase(ifcn_call_in.begin());
    MXFunction F = MXFunction(ifcn_call_in, XF);
    F.init();

    // Loop over all finite elements
    MX h_ = tf/ne;
    MX t0_ = 0;

    for (int i=0;i<ne;++i) {
      std::vector<MX> F_in;
      F_in.push_back(X);
      F_in.push_back(P);
      F_in.push_back(t0_);
      F_in.push_back(h_);
      t0_+= h_;
      std::vector<MX> F_out = F.call(F_in);
      X = F_out[0];
    }

    // Create a ruturn function with Integrator signature
    MXFunction ret = MXFunction(integratorIn("x0", X0, "p", P), integratorOut("xf", X));
    ret.init();

    return ret;

  }
Esempio n. 6
0
void SQPInternal::init(){
  // Call the init method of the base class
  NLPSolverInternal::init();
    
  // Read options
  maxiter_ = getOption("maxiter");
  maxiter_ls_ = getOption("maxiter_ls");
  c1_ = getOption("c1");
  beta_ = getOption("beta");
  merit_memsize_ = getOption("merit_memory");
  lbfgs_memory_ = getOption("lbfgs_memory");
  tol_pr_ = getOption("tol_pr");
  tol_du_ = getOption("tol_du");
  regularize_ = getOption("regularize");
  if(getOption("hessian_approximation")=="exact")
    hess_mode_ = HESS_EXACT;
  else if(getOption("hessian_approximation")=="limited-memory")
    hess_mode_ = HESS_BFGS;
   
  if (hess_mode_== HESS_EXACT && H_.isNull()) {
    if (!getOption("generate_hessian")){
      casadi_error("SQPInternal::evaluate: you set option 'hessian_approximation' to 'exact', but no hessian was supplied. Try with option \"generate_hessian\".");
    }
  }
  
  // If the Hessian is generated, we use exact approximation by default
  if (bool(getOption("generate_hessian"))){
    setOption("hessian_approximation", "exact");
  }
  
  // Allocate a QP solver
  CRSSparsity H_sparsity = hess_mode_==HESS_EXACT ? H_.output().sparsity() : sp_dense(n_,n_);
  H_sparsity = H_sparsity + DMatrix::eye(n_).sparsity();
  CRSSparsity A_sparsity = J_.isNull() ? CRSSparsity(0,n_,false) : J_.output().sparsity();

  QPSolverCreator qp_solver_creator = getOption("qp_solver");
  qp_solver_ = qp_solver_creator(H_sparsity,A_sparsity);

  // Set options if provided
  if(hasSetOption("qp_solver_options")){
    Dictionary qp_solver_options = getOption("qp_solver_options");
    qp_solver_.setOption(qp_solver_options);
  }
  qp_solver_.init();
  
  // Lagrange multipliers of the NLP
  mu_.resize(m_);
  mu_x_.resize(n_);
  
  // Lagrange gradient in the next iterate
  gLag_.resize(n_);
  gLag_old_.resize(n_);

  // Current linearization point
  x_.resize(n_);
  x_cand_.resize(n_);
  x_old_.resize(n_);

  // Constraint function value
  gk_.resize(m_);
  gk_cand_.resize(m_);
  
  // Hessian approximation
  Bk_ = DMatrix(H_sparsity);
  
  // Jacobian
  Jk_ = DMatrix(A_sparsity);

  // Bounds of the QP
  qp_LBA_.resize(m_);
  qp_UBA_.resize(m_);
  qp_LBX_.resize(n_);
  qp_UBX_.resize(n_);

  // QP solution
  dx_.resize(n_);
  qp_DUAL_X_.resize(n_);
  qp_DUAL_A_.resize(m_);

  // Gradient of the objective
  gf_.resize(n_);

  // Create Hessian update function
  if(hess_mode_ == HESS_BFGS){
    // Create expressions corresponding to Bk, x, x_old, gLag and gLag_old
    SXMatrix Bk = ssym("Bk",H_sparsity);
    SXMatrix x = ssym("x",input(NLP_X_INIT).sparsity());
    SXMatrix x_old = ssym("x",x.sparsity());
    SXMatrix gLag = ssym("gLag",x.sparsity());
    SXMatrix gLag_old = ssym("gLag_old",x.sparsity());
    
    SXMatrix sk = x - x_old;
    SXMatrix yk = gLag - gLag_old;
    SXMatrix qk = mul(Bk, sk);
    
    // Calculating theta
    SXMatrix skBksk = inner_prod(sk, qk);
    SXMatrix omega = if_else(inner_prod(yk, sk) < 0.2 * inner_prod(sk, qk),
                             0.8 * skBksk / (skBksk - inner_prod(sk, yk)),
                             1);
    yk = omega * yk + (1 - omega) * qk;
    SXMatrix theta = 1. / inner_prod(sk, yk);
    SXMatrix phi = 1. / inner_prod(qk, sk);
    SXMatrix Bk_new = Bk + theta * mul(yk, trans(yk)) - phi * mul(qk, trans(qk));
    
    // Inputs of the BFGS update function
    vector<SXMatrix> bfgs_in(BFGS_NUM_IN);
    bfgs_in[BFGS_BK] = Bk;
    bfgs_in[BFGS_X] = x;
    bfgs_in[BFGS_X_OLD] = x_old;
    bfgs_in[BFGS_GLAG] = gLag;
    bfgs_in[BFGS_GLAG_OLD] = gLag_old;
    bfgs_ = SXFunction(bfgs_in,Bk_new);
    bfgs_.setOption("number_of_fwd_dir",0);
    bfgs_.setOption("number_of_adj_dir",0);
    bfgs_.init();
    
    // Initial Hessian approximation
    B_init_ = DMatrix::eye(n_);
  }
  
  // Header
  if(bool(getOption("print_header"))){
    cout << "-------------------------------------------" << endl;
    cout << "This is CasADi::SQPMethod." << endl;
    switch (hess_mode_) {
      case HESS_EXACT:
        cout << "Using exact Hessian" << endl;
        break;
      case HESS_BFGS:
        cout << "Using limited memory BFGS Hessian approximation" << endl;
        break;
    }
    cout << endl;
    cout << "Number of variables:                       " << setw(9) << n_ << endl;
    cout << "Number of constraints:                     " << setw(9) << m_ << endl;
    cout << "Number of nonzeros in constraint Jacobian: " << setw(9) << A_sparsity.size() << endl;
    cout << "Number of nonzeros in Lagrangian Hessian:  " << setw(9) << H_sparsity.size() << endl;
    cout << endl;
  }
}
Esempio n. 7
0
 Function SXFunctionInternal::getFullJacobian() {
   SX J = veccat(outputv_).zz_jacobian(veccat(inputv_));
   return SXFunction(name_ + "_jac", inputv_, make_vector(J));
 }
Esempio n. 8
0
  void SimulatorInternal::init() {
    // Let the integration time start from the first point of the time grid.
    if (!grid_.empty()) integrator_.setOption("t0", grid_[0]);
    // Let the integration time stop at the last point of the time grid.
    if (!grid_.empty()) integrator_.setOption("tf", grid_[grid_.size()-1]);

    casadi_assert_message(isNonDecreasing(grid_), "The supplied time grid must be non-decreasing.");

    // Initialize the integrator
    integrator_.init();

    // Generate an output function if there is none (returns the whole state)
    if (output_fcn_.isNull()) {
      SX t = SX::sym("t");
      SX x = SX::sym("x", integrator_.input(INTEGRATOR_X0).sparsity());
      SX z = SX::sym("z", integrator_.input(INTEGRATOR_Z0).sparsity());
      SX p = SX::sym("p", integrator_.input(INTEGRATOR_P).sparsity());

      vector<SX> arg(DAE_NUM_IN);
      arg[DAE_T] = t;
      arg[DAE_X] = x;
      arg[DAE_Z] = z;
      arg[DAE_P] = p;

      vector<SX> out(INTEGRATOR_NUM_OUT);
      out[INTEGRATOR_XF] = x;
      out[INTEGRATOR_ZF] = z;

      // Create the output function
      output_fcn_ = SXFunction(arg, out);

      output_.scheme = SCHEME_IntegratorOutput;
    }

    // Initialize the output function
    output_fcn_.init();

    // Allocate inputs
    setNumInputs(INTEGRATOR_NUM_IN);
    for (int i=0; i<INTEGRATOR_NUM_IN; ++i) {
      input(i) = integrator_.input(i);
    }

    // Allocate outputs
    setNumOutputs(output_fcn_->getNumOutputs());
    for (int i=0; i<getNumOutputs(); ++i) {
      output(i) = Matrix<double>::zeros(output_fcn_.output(i).numel(), grid_.size());
      if (!output_fcn_.output(i).isEmpty()) {
        casadi_assert_message(output_fcn_.output(i).isVector(),
                              "SimulatorInternal::init: Output function output #" << i
                              << " has shape " << output_fcn_.output(i).dimString()
                              << ", while a column-matrix shape is expected.");
      }
    }

    casadi_assert_message(output_fcn_.input(DAE_T).numel() <=1,
                          "SimulatorInternal::init: output_fcn DAE_T argument must be "
                          "scalar or empty, but got " << output_fcn_.input(DAE_T).dimString());

    casadi_assert_message(
        output_fcn_.input(DAE_P).isEmpty() ||
        integrator_.input(INTEGRATOR_P).sparsity() == output_fcn_.input(DAE_P).sparsity(),
        "SimulatorInternal::init: output_fcn DAE_P argument must be empty or"
        << " have dimension " << integrator_.input(INTEGRATOR_P).dimString()
        << ", but got " << output_fcn_.input(DAE_P).dimString());

    casadi_assert_message(
        output_fcn_.input(DAE_X).isEmpty() ||
        integrator_.input(INTEGRATOR_X0).sparsity() == output_fcn_.input(DAE_X).sparsity(),
        "SimulatorInternal::init: output_fcn DAE_X argument must be empty or have dimension "
        << integrator_.input(INTEGRATOR_X0).dimString()
        << ", but got " << output_fcn_.input(DAE_X).dimString());

    // Call base class method
    FunctionInternal::init();

    // Output iterators
    output_its_.resize(getNumOutputs());
  }
void CollocationIntegratorInternal::init(){
  
  // Call the base class init
  IntegratorInternal::init();
  
  // Legendre collocation points
  double legendre_points[][6] = {
    {0},
    {0,0.500000},
    {0,0.211325,0.788675},
    {0,0.112702,0.500000,0.887298},
    {0,0.069432,0.330009,0.669991,0.930568},
    {0,0.046910,0.230765,0.500000,0.769235,0.953090}};
    
  // Radau collocation points
  double radau_points[][6] = {
    {0},
    {0,1.000000},
    {0,0.333333,1.000000},
    {0,0.155051,0.644949,1.000000},
    {0,0.088588,0.409467,0.787659,1.000000},
    {0,0.057104,0.276843,0.583590,0.860240,1.000000}};

  // Read options
  bool use_radau;
  if(getOption("collocation_scheme")=="radau"){
    use_radau = true;
  } else if(getOption("collocation_scheme")=="legendre"){
    use_radau = false;
  }
  
  // Hotstart?
  hotstart_ = getOption("hotstart");
  
  // Number of finite elements
  int nk = getOption("number_of_finite_elements");
  
  // Interpolation order
  int deg = getOption("interpolation_order");

  // Assume explicit ODE
  bool explicit_ode = f_.input(DAE_XDOT).size()==0;
  
  // All collocation time points
  double* tau_root = use_radau ? radau_points[deg] : legendre_points[deg];

  // Size of the finite elements
  double h = (tf_-t0_)/nk;
  
  // MX version of the same
  MX h_mx = h;
    
  // Coefficients of the collocation equation
  vector<vector<MX> > C(deg+1,vector<MX>(deg+1));

  // Coefficients of the continuity equation
  vector<MX> D(deg+1);

  // Collocation point
  SXMatrix tau = ssym("tau");

  // For all collocation points
  for(int j=0; j<deg+1; ++j){
    // Construct Lagrange polynomials to get the polynomial basis at the collocation point
    SXMatrix L = 1;
    for(int j2=0; j2<deg+1; ++j2){
      if(j2 != j){
        L *= (tau-tau_root[j2])/(tau_root[j]-tau_root[j2]);
      }
    }
    
    SXFunction lfcn(tau,L);
    lfcn.init();
  
    // Evaluate the polynomial at the final time to get the coefficients of the continuity equation
    lfcn.setInput(1.0);
    lfcn.evaluate();
    D[j] = lfcn.output();

    // Evaluate the time derivative of the polynomial at all collocation points to get the coefficients of the continuity equation
    for(int j2=0; j2<deg+1; ++j2){
      lfcn.setInput(tau_root[j2]);
      lfcn.setFwdSeed(1.0);
      lfcn.evaluate(1,0);
      C[j][j2] = lfcn.fwdSens();
    }
  }
  
  // Initial state
  MX X0("X0",nx_);
  
  // Parameters
  MX P("P",np_);
  
  // Backward state
  MX RX0("RX0",nrx_);
  
  // Backward parameters
  MX RP("RP",nrp_);
  
  // Collocated differential states and algebraic variables
  int nX = (nk*(deg+1)+1)*(nx_+nrx_);
  int nZ = nk*deg*(nz_+nrz_);
  
  // Unknowns
  MX V("V",nX+nZ);
  int offset = 0;
  
  // Get collocated states, algebraic variables and times
  vector<vector<MX> > X(nk+1);
  vector<vector<MX> > RX(nk+1);
  vector<vector<MX> > Z(nk);
  vector<vector<MX> > RZ(nk);
  coll_time_.resize(nk+1);
  for(int k=0; k<nk+1; ++k){
    // Number of time points
    int nj = k==nk ? 1 : deg+1;
    
    // Allocate differential states expressions at the time points
    X[k].resize(nj);
    RX[k].resize(nj);
    coll_time_[k].resize(nj);

    // Allocate algebraic variable expressions at the collocation points
    if(k!=nk){
      Z[k].resize(nj-1);
      RZ[k].resize(nj-1);
    }

    // For all time points
    for(int j=0; j<nj; ++j){
      // Get expressions for the differential state
      X[k][j] = V[range(offset,offset+nx_)];
      offset += nx_;
      RX[k][j] = V[range(offset,offset+nrx_)];
      offset += nrx_;
      
      // Get the local time
      coll_time_[k][j] = h*(k + tau_root[j]);
      
      // Get expressions for the algebraic variables
      if(j>0){
        Z[k][j-1] = V[range(offset,offset+nz_)];
        offset += nz_;
        RZ[k][j-1] = V[range(offset,offset+nrz_)];
        offset += nrz_;
      }
    }
  }
  
  // Check offset for consistency
  casadi_assert(offset==V.size());

  // Constraints
  vector<MX> g;
  g.reserve(2*(nk+1));
  
  // Quadrature expressions
  MX QF = MX::zeros(nq_);
  MX RQF = MX::zeros(nrq_);
  
  // Counter
  int jk = 0;
  
  // Add initial condition
  g.push_back(X[0][0]-X0);
  
  // For all finite elements
  for(int k=0; k<nk; ++k, ++jk){
  
    // For all collocation points
    for(int j=1; j<deg+1; ++j, ++jk){
      // Get the time
      MX tkj = coll_time_[k][j];
      
      // Get an expression for the state derivative at the collocation point
      MX xp_jk = 0;
      for(int j2=0; j2<deg+1; ++j2){
        xp_jk += C[j2][j]*X[k][j2];
      }
      
      // Add collocation equations to the NLP
      vector<MX> f_in(DAE_NUM_IN);
      f_in[DAE_T] = tkj;
      f_in[DAE_P] = P;
      f_in[DAE_X] = X[k][j];
      f_in[DAE_Z] = Z[k][j-1];
      
      vector<MX> f_out;
      if(explicit_ode){
        // Assume equation of the form ydot = f(t,y,p)
        f_out = f_.call(f_in);
        g.push_back(h_mx*f_out[DAE_ODE] - xp_jk);
      } else {
        // Assume equation of the form 0 = f(t,y,ydot,p)
        f_in[DAE_XDOT] = xp_jk/h_mx;
        f_out = f_.call(f_in);
        g.push_back(f_out[DAE_ODE]);
      }
      
      // Add the algebraic conditions
      if(nz_>0){
        g.push_back(f_out[DAE_ALG]);
      }
      
      // Add the quadrature
      if(nq_>0){
        QF += D[j]*h_mx*f_out[DAE_QUAD];
      }
      
      // Now for the backward problem
      if(nrx_>0){
        
        // Get an expression for the state derivative at the collocation point
        MX rxp_jk = 0;
        for(int j2=0; j2<deg+1; ++j2){
          rxp_jk += C[j2][j]*RX[k][j2];
        }
        
        // Add collocation equations to the NLP
        vector<MX> g_in(RDAE_NUM_IN);
        g_in[RDAE_T] = tkj;
        g_in[RDAE_X] = X[k][j];
        g_in[RDAE_Z] = Z[k][j-1];
        g_in[RDAE_P] = P;
        g_in[RDAE_RP] = RP;
        g_in[RDAE_RX] = RX[k][j];
        g_in[RDAE_RZ] = RZ[k][j-1];
        
        vector<MX> g_out;
        if(explicit_ode){
          // Assume equation of the form xdot = f(t,x,p)
          g_out = g_.call(g_in);
          g.push_back(h_mx*g_out[RDAE_ODE] - rxp_jk);
        } else {
          // Assume equation of the form 0 = f(t,x,xdot,p)
          g_in[RDAE_XDOT] = xp_jk/h_mx;
          g_in[RDAE_RXDOT] = rxp_jk/h_mx;
          g_out = g_.call(g_in);
          g.push_back(g_out[RDAE_ODE]);
        }
        
        // Add the algebraic conditions
        if(nrz_>0){
          g.push_back(g_out[RDAE_ALG]);
        }
        
        // Add the backward quadrature
        if(nrq_>0){
          RQF += D[j]*h_mx*g_out[RDAE_QUAD];
        }
      }
    }
    
    // Get an expression for the state at the end of the finite element
    MX xf_k = 0;
    for(int j=0; j<deg+1; ++j){
      xf_k += D[j]*X[k][j];
    }

    // Add continuity equation to NLP
    g.push_back(X[k+1][0] - xf_k);
    
    if(nrx_>0){
      // Get an expression for the state at the end of the finite element
      MX rxf_k = 0;
      for(int j=0; j<deg+1; ++j){
        rxf_k += D[j]*RX[k][j];
      }

      // Add continuity equation to NLP
      g.push_back(RX[k+1][0] - rxf_k);
    }
  }
  
  // Add initial condition for the backward integration
  if(nrx_>0){
    g.push_back(RX[nk][0]-RX0);
  }
  
  // Constraint expression
  MX gv = vertcat(g);
    
  // Make sure that the dimension is consistent with the number of unknowns
  casadi_assert_message(gv.size()==V.size(),"Implicit function unknowns and equations do not match");

  // Nonlinear constraint function input
  vector<MX> gfcn_in(1+INTEGRATOR_NUM_IN);
  gfcn_in[0] = V;
  gfcn_in[1+INTEGRATOR_X0] = X0;
  gfcn_in[1+INTEGRATOR_P] = P;
  gfcn_in[1+INTEGRATOR_RX0] = RX0;
  gfcn_in[1+INTEGRATOR_RP] = RP;

  vector<MX> gfcn_out(1+INTEGRATOR_NUM_OUT);
  gfcn_out[0] = gv;
  gfcn_out[1+INTEGRATOR_XF] = X[nk][0];
  gfcn_out[1+INTEGRATOR_QF] = QF;
  gfcn_out[1+INTEGRATOR_RXF] = RX[0][0];
  gfcn_out[1+INTEGRATOR_RQF] = RQF;
  
  // Nonlinear constraint function
  FX gfcn = MXFunction(gfcn_in,gfcn_out);
  
  // Expand f?
  bool expand_f = getOption("expand_f");
  if(expand_f){
    gfcn.init();
    gfcn = SXFunction(shared_cast<MXFunction>(gfcn));
  }
  
  // Get the NLP creator function
  implicitFunctionCreator implicit_function_creator = getOption("implicit_solver");
  
  // Allocate an NLP solver
  implicit_solver_ = implicit_function_creator(gfcn);
  
  // Pass options
  if(hasSetOption("implicit_solver_options")){
    const Dictionary& implicit_solver_options = getOption("implicit_solver_options");
    implicit_solver_.setOption(implicit_solver_options);
  }
  
  // Initialize the solver
  implicit_solver_.init();
  
  if(hasSetOption("startup_integrator")){
    
    // Create the linear solver
    integratorCreator startup_integrator_creator = getOption("startup_integrator");
    
    // Allocate an NLP solver
    startup_integrator_ = startup_integrator_creator(f_,g_);
    
    // Pass options
    startup_integrator_.setOption("number_of_fwd_dir",0); // not needed
    startup_integrator_.setOption("number_of_adj_dir",0); // not needed
    startup_integrator_.setOption("t0",coll_time_.front().front());
    startup_integrator_.setOption("tf",coll_time_.back().back());
    if(hasSetOption("startup_integrator_options")){
      const Dictionary& startup_integrator_options = getOption("startup_integrator_options");
      startup_integrator_.setOption(startup_integrator_options);
    }
    
    // Initialize the startup integrator
    startup_integrator_.init();
  }

  // Mark the system not yet integrated
  integrated_once_ = false;
}
Esempio n. 10
0
void RKIntegratorInternal::init(){
  // Call the base class init
  IntegratorInternal::init();
  casadi_assert_message(nq_==0, "Quadratures not supported.");
  
  // Number of finite elements
  int nk = getOption("number_of_finite_elements");
  
  // Interpolation order
  int deg = getOption("interpolation_order");
  casadi_assert_message(deg==1, "Not implemented");

  // Expand f?
  bool expand_f = getOption("expand_f");

  // Size of the finite elements
  double h = (tf_-t0_)/nk;
  
  // MX version of the same
  MX h_mx = h;
    
  // Initial state
  MX Y0("Y0",nx_);
  
  // Free parameters
  MX P("P",np_);

  // Current state
  MX Y = Y0;
  
  // Dummy time
  MX T = 0;
  
  // Integrate until the end
  for(int k=0; k<nk; ++k){
    
    // Call the ode right hand side function
    vector<MX> f_in(DAE_NUM_IN);
    f_in[DAE_T] = T;
    f_in[DAE_X] = Y;
    f_in[DAE_P] = P;
    vector<MX> f_out = f_.call(f_in);
    MX ode_rhs = f_out[DAE_ODE];
    
    // Explicit Euler step
    Y += h_mx*ode_rhs;
  }
  
  // Create a function which returns the state at the end of the time horizon
  vector<MX> yf_in(2);
  yf_in[0] = Y0;
  yf_in[1] = P;
  MXFunction yf_fun(yf_in,Y);
  
  // Should the function be expanded in elementary operations?
  if(expand_f){
    yf_fun.init();
    yf_fun_ = SXFunction(yf_fun);
  } else {
    yf_fun_ = yf_fun;
  }
  
  // Set number of derivative directions
  yf_fun_.setOption("number_of_fwd_dir",getOption("number_of_fwd_dir"));
  yf_fun_.setOption("number_of_adj_dir",getOption("number_of_adj_dir"));
  
  // Initialize function
  yf_fun_.init();
}
Esempio n. 11
0
void LiftedSQPInternal::init(){
  // Call the init method of the base class
  NlpSolverInternal::init();

  // Number of lifted variables
  nv = getOption("num_lifted");
  if(verbose_){
    cout << "Initializing SQP method with " << nx_ << " variables and " << ng_ << " constraints." << endl;
    cout << "Lifting " << nv << " variables." << endl;
    if(gauss_newton_){
      cout << "Gauss-Newton objective with " << F_.input().numel() << " terms." << endl;
    }
  }
  
  // Read options
  max_iter_ = getOption("max_iter");
  max_iter_ls_ = getOption("max_iter_ls");
  toldx_ = getOption("toldx");
  tolgl_ = getOption("tolgl");
  sigma_ = getOption("sigma");
  rho_ = getOption("rho");
  mu_safety_ = getOption("mu_safety");
  eta_ = getOption("eta");
  tau_ = getOption("tau");
    
  // Assume SXFunction for now
  SXFunction ffcn = shared_cast<SXFunction>(F_);
  casadi_assert(!ffcn.isNull());
  SXFunction gfcn = shared_cast<SXFunction>(G_);
  casadi_assert(!gfcn.isNull());
  
  // Extract the free variables and split into independent and dependent variables
  SX x = ffcn.inputExpr(0);
  int nx = x.size();
  nu = nx-nv;
  SX u = x[Slice(0,nu)];
  SX v = x[Slice(nu,nu+nv)];

  // Extract the constraint equations and split into constraints and definitions of dependent variables
  SX f1 = ffcn.outputExpr(0);
  int nf1 = f1.numel();
  SX g = gfcn.outputExpr(0);
  int nf2 = g.numel()-nv;
  SX v_eq = g(Slice(0,nv));
  SX f2 = g(Slice(nv,nv+nf2));
  
  // Definition of v
  SX v_def = v_eq + v;

  // Objective function
  SX f;
  
  // Multipliers
  SX lam_x, lam_g, lam_f2;
  if(gauss_newton_){
    
    // Least square objective
    f = inner_prod(f1,f1)/2;
    
  } else {
    
    // Scalar objective function
    f = f1;
    
    // Lagrange multipliers for the simple bounds on u
    SX lam_u = ssym("lam_u",nu);
    
    // Lagrange multipliers for the simple bounds on v
    SX lam_v = ssym("lam_v",nv);
    
    // Lagrange multipliers for the simple bounds on x
    lam_x = vertcat(lam_u,lam_v);

    // Lagrange multipliers corresponding to the definition of the dependent variables
    SX lam_v_eq = ssym("lam_v_eq",nv);

    // Lagrange multipliers for the nonlinear constraints that aren't eliminated
    lam_f2 = ssym("lam_f2",nf2);

    if(verbose_){
      cout << "Allocated intermediate variables." << endl;
    }
    
    // Lagrange multipliers for constraints
    lam_g = vertcat(lam_v_eq,lam_f2);
    
    // Lagrangian function
    SX lag = f + inner_prod(lam_x,x);
    if(!f2.empty()) lag += inner_prod(lam_f2,f2);
    if(!v.empty()) lag += inner_prod(lam_v_eq,v_def);
    
    // Gradient of the Lagrangian
    SX lgrad = casadi::gradient(lag,x);
    if(!v.empty()) lgrad -= vertcat(SX::zeros(nu),lam_v_eq); // Put here to ensure that lgrad is of the form "h_extended -v_extended"
    makeDense(lgrad);
    if(verbose_){
      cout << "Generated the gradient of the Lagrangian." << endl;
    }

    // Condensed gradient of the Lagrangian
    f1 = lgrad[Slice(0,nu)];
    nf1 = nu;
    
    // Gradient of h
    SX v_eq_grad = lgrad[Slice(nu,nu+nv)];
    
    // Reverse lam_v_eq and v_eq_grad
    SX v_eq_grad_reversed = v_eq_grad;
    copy(v_eq_grad.rbegin(),v_eq_grad.rend(),v_eq_grad_reversed.begin());
    SX lam_v_eq_reversed = lam_v_eq;
    copy(lam_v_eq.rbegin(),lam_v_eq.rend(),lam_v_eq_reversed.begin());
    
    // Augment h and lam_v_eq
    v_eq.append(v_eq_grad_reversed);
    v.append(lam_v_eq_reversed);
  }

  // Residual function G
  SXVector G_in(G_NUM_IN);
  G_in[G_X] = x;
  G_in[G_LAM_X] = lam_x;
  G_in[G_LAM_G] = lam_g;

  SXVector G_out(G_NUM_OUT);
  G_out[G_D] = v_eq;
  G_out[G_G] = g;
  G_out[G_F] = f;

  rfcn_ = SXFunction(G_in,G_out);
  rfcn_.setOption("number_of_fwd_dir",0);
  rfcn_.setOption("number_of_adj_dir",0);
  rfcn_.setOption("live_variables",true);
  rfcn_.init();
  if(verbose_){
    cout << "Generated residual function ( " << shared_cast<SXFunction>(rfcn_).getAlgorithmSize() << " nodes)." << endl;
  }
  
  // Difference vector d
  SX d = ssym("d",nv);
  if(!gauss_newton_){
    vector<SX> dg = ssym("dg",nv).data();
    reverse(dg.begin(),dg.end());
    d.append(dg);
  }

  // Substitute out the v from the h
  SX d_def = (v_eq + v)-d;
  SXVector ex(3);
  ex[0] = f1;
  ex[1] = f2;
  ex[2] = f;
  substituteInPlace(v, d_def, ex, false);
  SX f1_z = ex[0];
  SX f2_z = ex[1];
  SX f_z = ex[2];
  
  // Modified function Z
  enum ZIn{Z_U,Z_D,Z_LAM_X,Z_LAM_F2,Z_NUM_IN};
  SXVector zfcn_in(Z_NUM_IN);
  zfcn_in[Z_U] = u;
  zfcn_in[Z_D] = d;
  zfcn_in[Z_LAM_X] = lam_x;
  zfcn_in[Z_LAM_F2] = lam_f2;
  
  enum ZOut{Z_D_DEF,Z_F12,Z_NUM_OUT};
  SXVector zfcn_out(Z_NUM_OUT);
  zfcn_out[Z_D_DEF] = d_def;
  zfcn_out[Z_F12] = vertcat(f1_z,f2_z);
  
  SXFunction zfcn(zfcn_in,zfcn_out);
  zfcn.init();
  if(verbose_){
    cout << "Generated reconstruction function ( " << zfcn.getAlgorithmSize() << " nodes)." << endl;
  }

  // Matrix A and B in lifted Newton
  SX B = zfcn.jac(Z_U,Z_F12);
  SX B1 = B(Slice(0,nf1),Slice(0,B.size2()));
  SX B2 = B(Slice(nf1,B.size1()),Slice(0,B.size2()));
  if(verbose_){
    cout << "Formed B1 (dimension " << B1.size1() << "-by-" << B1.size2() << ", "<< B1.size() << " nonzeros) " <<
    "and B2 (dimension " << B2.size1() << "-by-" << B2.size2() << ", "<< B2.size() << " nonzeros)." << endl;
  }
  
  // Step in u
  SX du = ssym("du",nu);
  SX dlam_f2 = ssym("dlam_f2",lam_f2.sparsity());
  
  SX b1 = f1_z;
  SX b2 = f2_z;
  SX e;
  if(nv > 0){
    
    // Directional derivative of Z
    vector<vector<SX> > Z_fwdSeed(2,zfcn_in);
    vector<vector<SX> > Z_fwdSens(2,zfcn_out);
    vector<vector<SX> > Z_adjSeed;
    vector<vector<SX> > Z_adjSens;
    
    Z_fwdSeed[0][Z_U].setZero();
    Z_fwdSeed[0][Z_D] = -d;
    Z_fwdSeed[0][Z_LAM_X].setZero();
    Z_fwdSeed[0][Z_LAM_F2].setZero();
    
    Z_fwdSeed[1][Z_U] = du;
    Z_fwdSeed[1][Z_D] = -d;
    Z_fwdSeed[1][Z_LAM_X].setZero();
    Z_fwdSeed[1][Z_LAM_F2] = dlam_f2;
    
    zfcn.eval(zfcn_in,zfcn_out,Z_fwdSeed,Z_fwdSens,Z_adjSeed,Z_adjSens);
    
    b1 += Z_fwdSens[0][Z_F12](Slice(0,nf1));
    b2 += Z_fwdSens[0][Z_F12](Slice(nf1,B.size1()));
    e = Z_fwdSens[1][Z_D_DEF];
  }
  if(verbose_){
    cout << "Formed b1 (dimension " << b1.size1() << "-by-" << b1.size2() << ", "<< b1.size() << " nonzeros) " <<
    "and b2 (dimension " << b2.size1() << "-by-" << b2.size2() << ", "<< b2.size() << " nonzeros)." << endl;
  }
  
  // Generate Gauss-Newton Hessian
  if(gauss_newton_){
    b1 = mul(trans(B1),b1);
    B1 = mul(trans(B1),B1);
    if(verbose_){
      cout << "Gauss Newton Hessian (dimension " << B1.size1() << "-by-" << B1.size2() << ", "<< B1.size() << " nonzeros)." << endl;
    }
  }
  
  // Make sure b1 and b2 are dense vectors
  makeDense(b1);
  makeDense(b2);
  
  // Quadratic approximation
  SXVector lfcn_in(LIN_NUM_IN);
  lfcn_in[LIN_X] = x;
  lfcn_in[LIN_D] = d;
  lfcn_in[LIN_LAM_X] = lam_x;
  lfcn_in[LIN_LAM_G] = lam_g;
  
  SXVector lfcn_out(LIN_NUM_OUT);
  lfcn_out[LIN_F1] = b1;
  lfcn_out[LIN_J1] = B1;
  lfcn_out[LIN_F2] = b2;
  lfcn_out[LIN_J2] = B2;
  lfcn_ = SXFunction(lfcn_in,lfcn_out);
//   lfcn_.setOption("verbose",true);
  lfcn_.setOption("number_of_fwd_dir",0);
  lfcn_.setOption("number_of_adj_dir",0);
  lfcn_.setOption("live_variables",true);
  lfcn_.init();
  if(verbose_){
    cout << "Generated linearization function ( " << shared_cast<SXFunction>(lfcn_).getAlgorithmSize() << " nodes)." << endl;
  }
    
  // Step expansion
  SXVector efcn_in(EXP_NUM_IN);
  copy(lfcn_in.begin(),lfcn_in.end(),efcn_in.begin());
  efcn_in[EXP_DU] = du;
  efcn_in[EXP_DLAM_F2] = dlam_f2;
  efcn_ = SXFunction(efcn_in,e);
  efcn_.setOption("number_of_fwd_dir",0);
  efcn_.setOption("number_of_adj_dir",0);
  efcn_.setOption("live_variables",true);
  efcn_.init();
  if(verbose_){
    cout << "Generated step expansion function ( " << shared_cast<SXFunction>(efcn_).getAlgorithmSize() << " nodes)." << endl;
  }
  
  // Current guess for the primal solution
  DMatrix &x_k = output(NLP_SOLVER_X);
  
  // Current guess for the dual solution
  DMatrix &lam_x_k = output(NLP_SOLVER_LAM_X);
  DMatrix &lam_g_k = output(NLP_SOLVER_LAM_G);

  // Allocate a QP solver
  QpSolverCreator qp_solver_creator = getOption("qp_solver");
  qp_solver_ = qp_solver_creator(B1.sparsity(),B2.sparsity());
  
  // Set options if provided
  if(hasSetOption("qp_solver_options")){
    Dictionary qp_solver_options = getOption("qp_solver_options");
    qp_solver_.setOption(qp_solver_options);
  }
  
  // Initialize the QP solver
  qp_solver_.init();
  if(verbose_){
    cout << "Allocated QP solver." << endl;
  }

  // Residual
  d_k_ = DMatrix(d.sparsity(),0);
  
  // Primal step
  dx_k_ = DMatrix(x_k.sparsity());

  // Dual step
  dlam_x_k_ = DMatrix(lam_x_k.sparsity());
  dlam_g_k_ = DMatrix(lam_g_k.sparsity());
  
}
Esempio n. 12
0
void SdpSolverInternal::init() {
  // Call the init method of the base class
  FunctionInternal::init();

  calc_p_ = getOption("calc_p");
  calc_dual_ = getOption("calc_dual");
  print_problem_ = getOption("print_problem");

  // Find aggregate sparsity pattern
  Sparsity aggregate = input(SDP_SOLVER_G).sparsity();
  for (int i=0;i<n_;++i) {
    aggregate = aggregate + input(SDP_SOLVER_F)(ALL, Slice(i*m_, (i+1)*m_)).sparsity();
  }

  // Detect block diagonal structure in this sparsity pattern
  std::vector<int> p;
  std::vector<int> r;
  nb_ = aggregate.stronglyConnectedComponents(p, r);
  block_boundaries_.resize(nb_+1);
  std::copy(r.begin(), r.begin()+nb_+1, block_boundaries_.begin());

  block_sizes_.resize(nb_);
  for (int i=0;i<nb_;++i) {
    block_sizes_[i]=r[i+1]-r[i];
  }

  // Make a mapping function from dense blocks to inversely-permuted block diagonal P
  std::vector< SX > full_blocks;
  for (int i=0;i<nb_;++i) {
    full_blocks.push_back(SX::sym("block", block_sizes_[i], block_sizes_[i]));
  }

  Pmapper_ = SXFunction(full_blocks, blkdiag(full_blocks)(lookupvector(p, p.size()),
                                                         lookupvector(p, p.size())));
  Pmapper_.init();

  if (nb_>0) {
    // Make a mapping function from (G, F) -> (G[p, p]_j, F_i[p, p]j)
    SX G = SX::sym("G", input(SDP_SOLVER_G).sparsity());
    SX F = SX::sym("F", input(SDP_SOLVER_F).sparsity());

    std::vector<SX> in;
    in.push_back(G);
    in.push_back(F);
    std::vector<SX> out((n_+1)*nb_);
    for (int j=0;j<nb_;++j) {
      out[j] = G(p, p)(Slice(r[j], r[j+1]), Slice(r[j], r[j+1]));
    }
    for (int i=0;i<n_;++i) {
      SX Fi = F(ALL, Slice(i*m_, (i+1)*m_))(p, p);
      for (int j=0;j<nb_;++j) {
        out[(i+1)*nb_+j] = Fi(Slice(r[j], r[j+1]), Slice(r[j], r[j+1]));
      }
    }
    mapping_ = SXFunction(in, out);
    mapping_.init();
  }

  // Output arguments
  setNumOutputs(SDP_SOLVER_NUM_OUT);
  output(SDP_SOLVER_X) = DMatrix::zeros(n_, 1);
  output(SDP_SOLVER_P) = calc_p_? DMatrix(Pmapper_.output().sparsity(), 0) : DMatrix();
  output(SDP_SOLVER_DUAL) = calc_dual_? DMatrix(Pmapper_.output().sparsity(), 0) : DMatrix();
  output(SDP_SOLVER_COST) = 0.0;
  output(SDP_SOLVER_DUAL_COST) = 0.0;
  output(SDP_SOLVER_LAM_X) = DMatrix::zeros(n_, 1);
  output(SDP_SOLVER_LAM_A) = DMatrix::zeros(nc_, 1);

}