Пример #1
0
  void SqicInterface::init() {
    // Call the init method of the base class
    Conic::init();

    if (is_init_) sqicDestroy();

    inf_ = 1.0e+20;

    // Allocate data structures for SQIC
    bl_.resize(n_+nc_+1, 0);
    bu_.resize(n_+nc_+1, 0);
    x_.resize(n_+nc_+1, 0);
    hs_.resize(n_+nc_+1, 0);
    hEtype_.resize(n_+nc_+1, 0);
    pi_.resize(nc_+1, 0);
    rc_.resize(n_+nc_+1, 0);

    locH_ = st_[QP_STRUCT_H].colind();
    indH_ = st_[QP_STRUCT_H].row();

    // Fortran indices are one-based
    for (int i=0;i<indH_.size();++i) indH_[i]+=1;
    for (int i=0;i<locH_.size();++i) locH_[i]+=1;

    // Sparsity of augmented linear constraint matrix
    Sparsity A_ = vertcat(st_[QP_STRUCT_A], Sparsity::dense(1, n_));
    locA_ = A_.colind();
    indA_ = A_.row();

    // Fortran indices are one-based
    for (int i=0;i<indA_.size();++i) indA_[i]+=1;
    for (int i=0;i<locA_.size();++i) locA_[i]+=1;

    // helper functions for augmented linear constraint matrix
    MX a = MX::sym("A", st_[QP_STRUCT_A]);
    MX g = MX::sym("g", n_);
    std::vector<MX> ins;
    ins.push_back(a);
    ins.push_back(g);
    formatA_ = Function("formatA", ins, vertcat(a, g.T()));

    // Set objective row of augmented linear constraints
    bu_[n_+nc_] = inf_;
    bl_[n_+nc_] = -inf_;

    is_init_ = true;

    int n = n_;
    int m = nc_+1;

    int nnzA=formatA_.size_out(0);
    int nnzH=input(CONIC_H).size();

    std::fill(hEtype_.begin()+n_, hEtype_.end(), 3);

    sqic(&m , &n, &nnzA, &indA_[0], &locA_[0], &formatA_.output().nonzeros()[0], &bl_[0], &bu_[0],
         &hEtype_[0], &hs_[0], &x_[0], &pi_[0], &rc_[0], &nnzH, &indH_[0], &locH_[0],
         &input(CONIC_H).nonzeros()[0]);

  }
Пример #2
0
 BinaryMX<ScX, ScY>::BinaryMX(Operation op, const MX& x, const MX& y) : op_(op) {
   setDependencies(x, y);
   if (ScX) {
     setSparsity(y.sparsity());
   } else {
     setSparsity(x.sparsity());
   }
 }
Пример #3
0
 Assertion::Assertion(const MX& x, const MX& y, const std::string & fail_message)
     : fail_message_(fail_message) {
   casadi_assert_message(y.is_scalar(),
                         "Assertion:: assertion expression y must be scalar, but got "
                         << y.dim());
   setDependencies(x, y);
   setSparsity(x.sparsity());
 }
Пример #4
0
 MX SymbolicMX::join_primitives(std::vector<MX>::const_iterator& it) const {
   MX ret = *it++;
   if (ret.size()==size()) {
     return ret;
   } else {
     casadi_assert(ret.is_empty(true));
     return MX(size());
   }
 }
Пример #5
0
  UnaryMX::UnaryMX(Operation op, MX x) : op_(op) {
    // Put a densifying node in between if necessary
    if (!operation_checker<F00Checker>(op_)) {
      x.densify();
    }

    setDependencies(x);
    setSparsity(x->sparsity());
  }
Пример #6
0
Multiplication::Multiplication(const MX& x, const MX& y_trans){
  casadi_assert_message(x.size2() == y_trans.size2(),"Multiplication::Multiplication: dimension mismatch. Attempting to multiply " << x.dimString() << " with " << y_trans.dimString());
  setDependencies(x,y_trans);

  // Create the sparsity pattern for the matrix-matrix product
  CRSSparsity spres = x->sparsity().patternProduct(y_trans.sparsity());

  // Save sparsity
  setSparsity(spres);
}
  void CondensingIndefDpleInternal::init() {
    // Initialize the base classes
    DpleInternal::init();

    casadi_assert_message(!pos_def_,
      "pos_def option set to True: Solver only handles the indefinite case.");
    casadi_assert_message(const_dim_,
      "const_dim option set to False: Solver only handles the True case.");

    n_ = A_[0].size1();


    MX As = MX::sym("A", horzcat(A_));
    MX Vs = MX::sym("V", horzcat(V_));

    std::vector< MX > Vss = horzsplit(Vs, n_);
    std::vector< MX > Ass = horzsplit(As, n_);

    for (int k=0;k<K_;++k) {
      Vss[k] = (Vss[k]+Vss[k].T())/2;
    }

    MX R = MX::zeros(n_, n_);

    for (int k=0;k<K_;++k) {
      R = mul(mul(Ass[k], R), Ass[k].T()) + Vss[k];
    }

    std::vector< MX > Assr(K_);
    std::reverse_copy(Ass.begin(), Ass.end(), Assr.begin());

    MX Ap = mul(Assr);

    // Create an dlesolver instance
    solver_ = DleSolver(getOption(solvername()), dleStruct("a", Ap.sparsity(), "v", R.sparsity()));
    solver_.setOption(getOption(optionsname()));

    // Initialize the NLP solver
    solver_.init();

    std::vector<MX> Pr = solver_.call(dpleIn("a", Ap, "v", R));

    std::vector<MX> Ps(K_);
    Ps[0] = Pr[0];

    for (int k=0;k<K_-1;++k) {
      Ps[k+1] = mul(mul(Ass[k], Ps[k]), Ass[k].T()) + Vss[k];
    }

    f_ = MXFunction(dpleIn("a", As, "v", Vs), dpleOut("p", horzcat(Ps)));
    f_.init();

    Wrapper::checkDimensions();

  }
Пример #8
0
 HorzRepsum::HorzRepsum(const MX& x, int n) : n_(n) {
   casadi_assert(x.size2() % n == 0);
   std::vector<Sparsity> sp = horzsplit(x.sparsity(), x.size2()/n);
   Sparsity block = sp[0];
   for (int i=1;i<sp.size();++i) {
     block = block+sp[i];
   }
   Sparsity goal = repmat(block, 1, n);
   setDependencies(project(x, goal));
   setSparsity(block);
 }
Пример #9
0
 /// Convert scalar to matrix
 inline static MX toMatrix(const MX& x, const Sparsity& sp) {
   if (x.size()==sp.size()) {
     return x;
   } else {
     return MX(sp, x);
   }
 }
Пример #10
0
MX UnaryMX::create(int op, const MX& x){
  /*if(x.isConstant()){
    // Constant folding
    const DMatrix& x_val = x.getConstant();
    DMatrix y_val; // Dummy argument
    DMatrix r_val;
    casadi_math<DMatrix>::fun(op,x_val,y_val,r_val);
    return r_val;
  } else*/ if(operation_checker<F0XChecker>(op) && isZero(x)){
    // If identically zero
    return MX::sparse(x.size1(),x.size2());
  } else {
    // Create a new node
    return MX::create(new UnaryMX(Operation(op),x));
  }
}
Пример #11
0
  Diagsplit::Diagsplit(const MX& x,
    const std::vector<int>& offset1,
    const std::vector<int>& offset2) : Split(x, offset1) {

    // Split up the sparsity pattern
    output_sparsity_ = diagsplit(x.sparsity(), offset1, offset2);

    // Have offset_ refer to the nonzero offsets instead of column offsets
    offset_.resize(1);
    for (std::vector<Sparsity>::const_iterator it=output_sparsity_.begin();
        it!=output_sparsity_.end();
        ++it) {
      offset_.push_back(offset_.back() + it->nnz());
    }

    casadi_assert_message(offset_.back()==x.nnz(),
      "DiagSplit:: the presence of nonzeros outside the diagonal blocks in unsupported.");
  }
Пример #12
0
  Vertsplit::Vertsplit(const MX& x, const std::vector<int>& offset) : Split(x, offset) {

    // Split up the sparsity pattern
    output_sparsity_ = vertsplit(x.sparsity(), offset_);

    // Have offset_ refer to the nonzero offsets instead of column offsets
    offset_.resize(1);
    for (std::vector<Sparsity>::const_iterator it=output_sparsity_.begin();
        it!=output_sparsity_.end();
        ++it) {
      offset_.push_back(offset_.back() + it->nnz());
    }
  }
Пример #13
0
 Multiplication<TrX,TrY>::Multiplication(const MX& z, const MX& x, const MX& y){
   casadi_assert_message(TrX || !TrY, "Illegal combination");
   casadi_assert_message(TrX, "Not implemented");
   casadi_assert_message(!TrY,"Not implemented");
   casadi_assert_message(x.size1() == y.size1() && x.size2() == z.size1() && y.size2() == z.size2(),"Multiplication::Multiplication: dimension mismatch. Attempting to multiply trans(" << x.dimString() << ") with " << y.dimString() << " and add the result to " << z.dimString());
   setDependencies(z,x,y);
   setSparsity(z.sparsity());
 }
Пример #14
0
  void SimpleIndefDleInternal::init() {

    DleInternal::init();

    casadi_assert_message(!pos_def_,
      "pos_def option set to True: Solver only handles the indefinite case.");

    n_ = A_.size1();

    MX As = MX::sym("A", A_);
    MX Vs = MX::sym("V", V_);

    MX Vss = (Vs+Vs.T())/2;

    MX A_total = DMatrix::eye(n_*n_) - kron(As, As);

    MX Pf = solve(A_total, vec(Vss), getOption("linear_solver"));

    MX P = reshape(Pf, n_, n_);

    f_ = MXFunction(dleIn("a", As, "v", Vs),
      dleOut("p", MX(P(output().sparsity()))));

    f_.init();

    casadi_assert(getNumOutputs()==f_.getNumOutputs());
    for (int i=0;i<getNumInputs();++i) {
      casadi_assert_message(input(i).sparsity()==f_.input(i).sparsity(),
        "Sparsity mismatch for input " << i << ":" <<
        input(i).dimString() << " <-> " << f_.input(i).dimString() << ".");
    }
    for (int i=0;i<getNumOutputs();++i) {
      casadi_assert_message(output(i).sparsity()==f_.output(i).sparsity(),
        "Sparsity mismatch for output " << i << ":" <<
        output(i).dimString() << " <-> " << f_.output(i).dimString() << ".");
    }
  }
  void FixedSmithLrDleInternal::init() {
    iter_  = getOption("iter");

    LrDleInternal::init();

    casadi_assert_message(!pos_def_,
      "pos_def option set to True: Solver only handles the indefinite case.");

    MX H = MX::sym("H", H_);
    MX A = MX::sym("A", A_);
    MX C = MX::sym("C", C_);
    MX V = MX::sym("V", V_);

    MX Vs = (V+V.T())/2;

    MX D = with_C_ ? C : DMatrix::eye(A_.size1());


    std::vector<MX> HPH(Hs_.size(), 0);
    std::vector<MX> Hs = with_H_? horzsplit(H, Hi_) : std::vector<MX>();
    MX out = 0;

    for (int i=0;i<iter_;++i) {
      if (with_H_) {
        for (int k=0;k<Hs.size();++k) {
          MX v = mul(D.T(), Hs[k]);
          HPH[k]+= mul(v.T(), mul(Vs, v));
        }
      } else {
        out += mul(D, mul(Vs, D.T()));
      }
      D = mul(A, D);
    }

    std::vector<MX> dle_in(LR_DLE_NUM_IN);
    dle_in[LR_DLE_A] = A;
    dle_in[LR_DLE_V] = V;
    if (with_C_) dle_in[LR_DLE_C] = C;
    if (with_H_) dle_in[LR_DLE_H] = H;

    f_ = MXFunction(dle_in, lrdleOut("y", with_H_? diagcat(HPH): out));
    f_.init();

    Wrapper<FixedSmithLrDleInternal>::checkDimensions();

  }
Пример #16
0
  MX UnaryMX::getBinary(int op, const MX& y, bool scX, bool scY) const {
    switch (op_) {
    case OP_NEG:
      if (op==OP_ADD) return y->getBinary(OP_SUB, dep(), scY, scX);
      else if (op==OP_MUL) return -dep()->getBinary(OP_MUL, y, scX, scY);
      else if (op==OP_DIV) return -dep()->getBinary(OP_DIV, y, scX, scY);
      break;
    case OP_TWICE:
      if (op==OP_SUB && y.isEqual(dep(), maxDepth())) return dep();
      break;
    case OP_SQ:
      if (op==OP_ADD && y.getOp()==OP_SQ) /*sum of squares:*/
        if ((dep().getOp()==OP_SIN && y->dep().getOp()==OP_COS) ||
           (dep().getOp()==OP_COS && y->dep()->getOp()==OP_SIN)) /* sin^2(x)+sin^2(y) */
          if (dep()->dep().isEqual(y->dep()->dep(), maxDepth())) /*sin^2(x) + cos^2(x) */
            return MX::ones(y.sparsity());
      break;
    default: break; // no rule
    }

    // Fallback to default implementation
    return MXNode::getBinary(op, y, scX, scY);
  }
Пример #17
0
 MX GenericCall::projectArg(const MX& x, const Sparsity& sp, int i) {
   if (x.size()==sp.size()) {
     // Insert sparsity projection nodes if needed
     return project(x, sp);
   } else {
     // Different dimensions
     if (x.is_empty() || sp.is_empty()) { // NOTE: To permissive?
       // Replace nulls with zeros of the right dimension
       return MX::zeros(sp);
     } else if (x.is_scalar()) {
       // Scalar argument means set all
       return MX(sp, x);
     } else if (x.size1()==sp.size2() && x.size2()==sp.size1() && sp.is_vector()) {
       // Transposed vector
       return projectArg(x.T(), sp, i);
     } else {
       // Mismatching dimensions
       casadi_error("Cannot create function call node: Dimension mismatch for argument "
                    << i << ". Argument has shape " << x.size()
                    << " but function input has shape " << sp.size());
     }
   }
 }
Пример #18
0
  void BinaryMX<ScX, ScY>::evalAdj(const std::vector<std::vector<MX> >& aseed,
                                   std::vector<std::vector<MX> >& asens) {
    // Get partial derivatives
    MX pd[2];
    casadi_math<MX>::der(op_, dep(0), dep(1), shared_from_this<MX>(), pd);

    // Propagate adjoint seeds
    for (int d=0; d<aseed.size(); ++d) {
      MX s = aseed[d][0];
      for (int c=0; c<2; ++c) {
        // Get increment of sensitivity c
        MX t = pd[c]*s;

        // If dimension mismatch (i.e. one argument is scalar), then sum all the entries
        if (!t.isscalar() && t.shape() != dep(c).shape()) {
          if (pd[c].shape()!=s.shape()) pd[c] = MX(s.sparsity(), pd[c]);
          t = inner_prod(pd[c], s);
        }

        // Propagate the seeds
        asens[d][c] += t;
      }
    }
  }
Пример #19
0
void EvaluationMX::create(const FX& fcn, const std::vector<MX> &arg,
    std::vector<MX> &res, const std::vector<std::vector<MX> > &fseed,
    std::vector<std::vector<MX> > &fsens,
    const std::vector<std::vector<MX> > &aseed,
    std::vector<std::vector<MX> > &asens, bool output_given) {

  // Number inputs and outputs
  int num_in = fcn.getNumInputs();
  int num_out = fcn.getNumOutputs();

  // Number of directional derivatives
  int nfdir = fseed.size();
  int nadir = aseed.size();

  // Create the evaluation node
  MX ev;
  if(nfdir>0 || nadir>0){
    // Create derivative function
    Derivative dfcn(fcn,nfdir,nadir);
    stringstream ss;
    ss << "der_" << fcn.getOption("name") << "_" << nfdir << "_" << nadir;
    dfcn.setOption("verbose",fcn.getOption("verbose"));
    dfcn.setOption("name",ss.str());
    dfcn.init();
    
    // All inputs
    vector<MX> darg;
    darg.reserve(num_in*(1+nfdir) + num_out*nadir);
    darg.insert(darg.end(),arg.begin(),arg.end());
    
    // Forward seeds
    for(int dir=0; dir<nfdir; ++dir){
      darg.insert(darg.end(),fseed[dir].begin(),fseed[dir].end());
    }
    
    // Adjoint seeds
    for(int dir=0; dir<nadir; ++dir){
      darg.insert(darg.end(),aseed[dir].begin(),aseed[dir].end());
    }
    
    ev.assignNode(new EvaluationMX(dfcn, darg));
  } else {
    ev.assignNode(new EvaluationMX(fcn, arg));
  }

  // Output index
  int ind = 0;

  // Create the output nodes corresponding to the nondifferented function
  res.resize(num_out);
  for (int i = 0; i < num_out; ++i, ++ind) {
    if(!output_given){
      if(!fcn.output(i).empty()){
        res[i].assignNode(new OutputNode(ev, ind));
      } else {
        res[i] = MX();
      }
    }
  }

  // Forward sensitivities
  fsens.resize(nfdir);
  for(int dir = 0; dir < nfdir; ++dir){
    fsens[dir].resize(num_out);
    for (int i = 0; i < num_out; ++i, ++ind) {
      if (!fcn.output(i).empty()){
        fsens[dir][i].assignNode(new OutputNode(ev, ind));
      } else {
        fsens[dir][i] = MX();
      }
    }
  }

  // Adjoint sensitivities
  asens.resize(nadir);
  for (int dir = 0; dir < nadir; ++dir) {
    asens[dir].resize(num_in);
    for (int i = 0; i < num_in; ++i, ++ind) {
      if (!fcn.input(i).empty()) {
        asens[dir][i].assignNode(new OutputNode(ev, ind));
      } else {
        asens[dir][i] = MX();
      }
    }
  }
}
Пример #20
0
 Reshape::Reshape(const MX& x, Sparsity sp) {
   casadi_assert(x.size()==sp.size());
   setDependencies(x);
   setSparsity(sp);
 }
Пример #21
0
 InnerProd::InnerProd(const MX& x, const MX& y) {
   casadi_assert(x.sparsity()==y.sparsity());
   setDependencies(x, y);
   setSparsity(Sparsity::scalar());
 }
Пример #22
0
Densification::Densification(const MX& x){
  setDependencies(x);
  setSparsity(CRSSparsity(x.size1(),x.size2(),true));
}
  void SimpleIndefDleInternal::init() {

    DleInternal::init();

    casadi_assert_message(!pos_def_,
      "pos_def option set to True: Solver only handles the indefinite case.");


    n_ = A_.size1();

    MX As = MX::sym("A", A_);
    MX Vs = MX::sym("V", V_);
    MX Cs = MX::sym("C", C_);
    MX Hs = MX::sym("H", H_);

    MX Vss = (Vs+Vs.T())/2;
    if (with_C_) Vss = mul(mul(Cs, Vss), Cs.T());

    MX A_total = DMatrix::eye(n_*n_) - kron(As,As);

    // Should be treated by solve node
    MX Pf = solve(A_total, vec(Vss), getOption("linear_solver"));

    std::vector<MX> v_in;
    v_in.push_back(As);
    v_in.push_back(Vs);
    v_in.push_back(Cs);

    MX P = reshape(Pf,n_,n_);

    std::vector<MX> HPH;

    if (with_H_) {
      std::vector<MX> H = horzsplit(Hs,Hi_);

      for (int k=0;k<H.size();++k) {
        HPH.push_back(mul(H[k].T(),mul(P,H[k])));
      }
    }

    std::vector<MX> dle_in(DLE_NUM_IN);
    dle_in[DLE_A] = As;
    dle_in[DLE_V] = Vs;
    if (with_C_) dle_in[DLE_C] = Cs;
    if (with_H_) dle_in[DLE_H] = Hs;

    f_ = MXFunction(dle_in,dleOut("p",with_H_? diagcat(HPH) : P(output().sparsity())));

    f_.init();

    casadi_assert(nOut()==f_.nOut());
    for (int i=0;i<nIn();++i) {
      casadi_assert_message(input(i).sparsity()==f_.input(i).sparsity(),
        "Sparsity mismatch for input " << i << ":" <<
        input(i).dimString() << " <-> " << f_.input(i).dimString() << ".");
    }
    for (int i=0;i<nOut();++i) {
      casadi_assert_message(output(i).sparsity()==f_.output(i).sparsity(),
        "Sparsity mismatch for output " << i << ":" <<
        output(i).dimString() << " <-> " << f_.output(i).dimString() << ".");
    }
  }
Пример #24
0
  void SdqpToSdp::init() {
    // Initialize the base classes
    SdqpSolverInternal::init();

    cholesky_ = LinearSolver("cholesky", "csparsecholesky", st_[SDQP_STRUCT_H]);

    MX g_socp = MX::sym("x", cholesky_.getFactorizationSparsity(true));
    MX h_socp = MX::sym("h", n_);

    MX f_socp = sqrt(inner_prod(h_socp, h_socp));
    MX en_socp = 0.5/f_socp;

    MX f_sdqp = MX::sym("f", input(SDQP_SOLVER_F).sparsity());
    MX g_sdqp = MX::sym("g", input(SDQP_SOLVER_G).sparsity());

    std::vector<MX> fi(n_+1);
    MX znp = MX(n_+1, n_+1);
    for (int k=0;k<n_;++k) {
      MX gk = vertcat(g_socp(ALL, k), MX(1, 1));
      MX fk = -blockcat(znp, gk, gk.T(), MX(1, 1));
      // TODO(Joel): replace with ALL
      fi.push_back(diagcat(f_sdqp(ALL, Slice(f_sdqp.size1()*k, f_sdqp.size1()*(k+1))), fk));
    }
    MX fin = en_socp*DMatrix::eye(n_+2);
    fin(n_, n_+1) = en_socp;
    fin(n_+1, n_) = en_socp;

    fi.push_back(diagcat(DMatrix(f_sdqp.size1(), f_sdqp.size1()), -fin));

    MX h0 = vertcat(h_socp, DMatrix(1, 1));
    MX g = blockcat(f_socp*DMatrix::eye(n_+1), h0, h0.T(), f_socp);

    g = diagcat(g_sdqp, g);

    Dict opts;
    opts["input_scheme"] = IOScheme("g_socp", "h_socp", "f_sdqp", "g_sdqp");
    opts["output_scheme"] = IOScheme("f", "g");
    mapping_ = MXFunction("mapping", make_vector(g_socp, h_socp, f_sdqp, g_sdqp),
                          make_vector(horzcat(fi), g), opts);

    Dict options;
    if (hasSetOption(optionsname())) options = getOption(optionsname());
    // Create an SdpSolver instance
    solver_ = SdpSolver("sdpsolver", getOption(solvername()),
                        make_map("g", mapping_.output("g").sparsity(),
                                 "f", mapping_.output("f").sparsity(),
                                 "a", horzcat(input(SDQP_SOLVER_A).sparsity(),
                                              Sparsity(nc_, 1))),
                        options);

    solver_.input(SDP_SOLVER_C).at(n_)=1;

    // Output arguments
    obuf_.resize(SDQP_SOLVER_NUM_OUT);
    output(SDQP_SOLVER_X) = DMatrix::zeros(n_, 1);

    std::vector<int> r = range(input(SDQP_SOLVER_G).size1());
    output(SDQP_SOLVER_P) = solver_.output(SDP_SOLVER_P).isempty() ? DMatrix() :
        solver_.output(SDP_SOLVER_P)(r, r);
    output(SDQP_SOLVER_DUAL) = solver_.output(SDP_SOLVER_DUAL).isempty() ? DMatrix() :
        solver_.output(SDP_SOLVER_DUAL)(r, r);
    output(SDQP_SOLVER_COST) = 0.0;
    output(SDQP_SOLVER_DUAL_COST) = 0.0;
    output(SDQP_SOLVER_LAM_X) = DMatrix::zeros(n_, 1);
    output(SDQP_SOLVER_LAM_A) = DMatrix::zeros(nc_, 1);
  }
Пример #25
0
 SetNonzeros<Add>::SetNonzeros(const MX& y, const MX& x) {
   this->setSparsity(y.sparsity());
   this->setDependencies(y, x);
 }
Пример #26
0
void NLPSolverInternal::init(){
  // Read options
  verbose_ = getOption("verbose");
  gauss_newton_ = getOption("gauss_newton");
  
  // Initialize the functions
  casadi_assert_message(!F_.isNull(),"No objective function");
  if(!F_.isInit()){
    F_.init();
    log("Objective function initialized");
  }
  if(!G_.isNull() && !G_.isInit()){
    G_.init();
    log("Constraint function initialized");
  }

  // Get dimensions
  n_ = F_.input(0).numel();
  m_ = G_.isNull() ? 0 : G_.output(0).numel();

  parametric_ = getOption("parametric");
  
  if (parametric_) {
    casadi_assert_message(F_.getNumInputs()==2, "Wrong number of input arguments to F for parametric NLP. Must be 2, but got " << F_.getNumInputs());
  } else {
    casadi_assert_message(F_.getNumInputs()==1, "Wrong number of input arguments to F for non-parametric NLP. Must be 1, but got " << F_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option.");
  }

  // Basic sanity checks
  casadi_assert_message(F_.getNumInputs()==1 || F_.getNumInputs()==2, "Wrong number of input arguments to F. Must be 1 or 2");
  
  if (F_.getNumInputs()==2) parametric_=true;
  casadi_assert_message(getOption("ignore_check_vec") || gauss_newton_ || F_.input().size2()==1,
     "To avoid confusion, the input argument to F must be vector. You supplied " << F_.input().dimString() << endl <<
     " We suggest you make the following changes:" << endl <<
     "   -  F is an SXFunction:  SXFunction([X],[rhs]) -> SXFunction([vec(X)],[rhs])" << endl <<
     "             or            F -                   ->  F = vec(F) " << 
     "   -  F is an MXFunction:  MXFunction([X],[rhs]) -> " <<  endl <<
     "                                     X_vec = MX(\"X\",vec(X.sparsity())) " << endl <<
     "                                     F_vec = MXFunction([X_flat],[F.call([X_flat.reshape(X.sparsity())])[0]]) " << endl <<
     "             or            F -                   ->  F = vec(F) " << 
     " You may ignore this warning by setting the 'ignore_check_vec' option to true." << endl
  );
  
  casadi_assert_message(F_.getNumOutputs()>=1, "Wrong number of output arguments to F");
  casadi_assert_message(gauss_newton_  || F_.output().scalar(), "Output argument of F not scalar.");
  casadi_assert_message(F_.output().dense(), "Output argument of F not dense.");
  casadi_assert_message(F_.input().dense(), "Input argument of F must be dense. You supplied " << F_.input().dimString());
  
  if(!G_.isNull()) {
    if (parametric_) {
      casadi_assert_message(G_.getNumInputs()==2, "Wrong number of input arguments to G for parametric NLP. Must be 2, but got " << G_.getNumInputs());
    } else {
      casadi_assert_message(G_.getNumInputs()==1, "Wrong number of input arguments to G for non-parametric NLP. Must be 1, but got " << G_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option.");
    }
    casadi_assert_message(G_.getNumOutputs()>=1, "Wrong number of output arguments to G");
    casadi_assert_message(G_.input().numel()==n_, "Inconsistent dimensions");
    casadi_assert_message(G_.input().sparsity()==F_.input().sparsity(), "F and G input dimension must match. F " << F_.input().dimString() << ". G " << G_.input().dimString());
  }
  
  // Find out if we are to expand the objective function in terms of scalar operations
  bool expand_f = getOption("expand_f");
  if(expand_f){
    log("Expanding objective function");
    
    // Cast to MXFunction
    MXFunction F_mx = shared_cast<MXFunction>(F_);
    if(F_mx.isNull()){
      casadi_warning("Cannot expand objective function as it is not an MXFunction");
    } else {
      // Take use the input scheme of G if possible (it might be an SXFunction)
      vector<SXMatrix> inputv;
      if(!G_.isNull() && F_.getNumInputs()==G_.getNumInputs()){
        inputv = G_.symbolicInputSX();
      } else {
        inputv = F_.symbolicInputSX();
      }
      
      // Try to expand the MXFunction
      F_ = F_mx.expand(inputv);
      F_.setOption("number_of_fwd_dir",F_mx.getOption("number_of_fwd_dir"));
      F_.setOption("number_of_adj_dir",F_mx.getOption("number_of_adj_dir"));
      F_.init();
    }
  }
  
  
  // Find out if we are to expand the constraint function in terms of scalar operations
  bool expand_g = getOption("expand_g");
  if(expand_g){
    log("Expanding constraint function");
    
    // Cast to MXFunction
    MXFunction G_mx = shared_cast<MXFunction>(G_);
    if(G_mx.isNull()){
      casadi_warning("Cannot expand constraint function as it is not an MXFunction");
    } else {
      // Take use the input scheme of F if possible (it might be an SXFunction)
      vector<SXMatrix> inputv;
      if(F_.getNumInputs()==G_.getNumInputs()){
        inputv = F_.symbolicInputSX();
      } else {
        inputv = G_.symbolicInputSX();
      }
      
      // Try to expand the MXFunction
      G_ = G_mx.expand(inputv);
      G_.setOption("number_of_fwd_dir",G_mx.getOption("number_of_fwd_dir"));
      G_.setOption("number_of_adj_dir",G_mx.getOption("number_of_adj_dir"));
      G_.init();
    }
  }
  
  // Find out if we are to expand the constraint function in terms of scalar operations
  bool generate_hessian = getOption("generate_hessian");
  if(generate_hessian && H_.isNull()){
    casadi_assert_message(!gauss_newton_,"Automatic generation of Gauss-Newton Hessian not yet supported");
    log("generating hessian");
    
    // Simple if unconstrained
    if(G_.isNull()){
      // Create Hessian of the objective function
      FX HF = F_.hessian();
      HF.init();
      
      // Symbolic inputs of HF
      vector<MX> HF_in = F_.symbolicInput();
      
      // Lagrange multipliers
      MX lam("lam",0);
      
      // Objective function scaling
      MX sigma("sigma");
      
      // Inputs of the Hessian function
      vector<MX> H_in = HF_in;
      H_in.insert(H_in.begin()+1, lam);
      H_in.insert(H_in.begin()+2, sigma);

      // Get an expression for the Hessian of F
      MX hf = HF.call(HF_in).at(0);
      
      // Create the scaled Hessian function
      H_ = MXFunction(H_in, sigma*hf);
      log("Unconstrained Hessian function generated");
      
    } else { // G_.isNull()
      
      // Check if the functions are SXFunctions
      SXFunction F_sx = shared_cast<SXFunction>(F_);
      SXFunction G_sx = shared_cast<SXFunction>(G_);
      
      // Efficient if both functions are SXFunction
      if(!F_sx.isNull() && !G_sx.isNull()){
        // Expression for f and g
        SXMatrix f = F_sx.outputSX();
        SXMatrix g = G_sx.outputSX();
        
        // Numeric hessian
        bool f_num_hess = F_sx.getOption("numeric_hessian");
        bool g_num_hess = G_sx.getOption("numeric_hessian");
        
        // Number of derivative directions
        int f_num_fwd = F_sx.getOption("number_of_fwd_dir");
        int g_num_fwd = G_sx.getOption("number_of_fwd_dir");
        int f_num_adj = F_sx.getOption("number_of_adj_dir");
        int g_num_adj = G_sx.getOption("number_of_adj_dir");
        
        // Substitute symbolic variables in f if different input variables from g
        if(!isEqual(F_sx.inputSX(),G_sx.inputSX())){
          f = substitute(f,F_sx.inputSX(),G_sx.inputSX());
        }
        
        // Lagrange multipliers
        SXMatrix lam = ssym("lambda",g.size1());

        // Objective function scaling
        SXMatrix sigma = ssym("sigma");        
        
        // Lagrangian function
        vector<SXMatrix> lfcn_in(parametric_? 4: 3);
        lfcn_in[0] = G_sx.inputSX();
        lfcn_in[1] = lam;
        lfcn_in[2] = sigma;
        if (parametric_) lfcn_in[3] = G_sx.inputSX(1);
        SXFunction lfcn(lfcn_in, sigma*f + inner_prod(lam,g));
        lfcn.setOption("verbose",getOption("verbose"));
        lfcn.setOption("numeric_hessian",f_num_hess || g_num_hess);
        lfcn.setOption("number_of_fwd_dir",std::min(f_num_fwd,g_num_fwd));
        lfcn.setOption("number_of_adj_dir",std::min(f_num_adj,g_num_adj));
        lfcn.init();
        
        // Hessian of the Lagrangian
        H_ = static_cast<FX&>(lfcn).hessian();
        H_.setOption("verbose",getOption("verbose"));
        log("SX Hessian function generated");
        
      } else { // !F_sx.isNull() && !G_sx.isNull()
        // Check if the functions are SXFunctions
        MXFunction F_mx = shared_cast<MXFunction>(F_);
        MXFunction G_mx = shared_cast<MXFunction>(G_);
        
        // If they are, check if the arguments are the same
        if(!F_mx.isNull() && !G_mx.isNull() && isEqual(F_mx.inputMX(),G_mx.inputMX())){
          casadi_warning("Exact Hessian calculation for MX is still experimental");
          
          // Expression for f and g
          MX f = F_mx.outputMX();
          MX g = G_mx.outputMX();
          
          // Lagrange multipliers
          MX lam("lam",g.size1());
      
          // Objective function scaling
          MX sigma("sigma");

          // Inputs of the Lagrangian function
          vector<MX> lfcn_in(parametric_? 4:3);
          lfcn_in[0] = G_mx.inputMX();
          lfcn_in[1] = lam;
          lfcn_in[2] = sigma;
          if (parametric_) lfcn_in[3] = G_mx.inputMX(1);

          // Lagrangian function
          MXFunction lfcn(lfcn_in,sigma*f+ inner_prod(lam,g));
          lfcn.init();
	  log("SX Lagrangian function generated");
          
/*          cout << "countNodes(lfcn.outputMX()) = " << countNodes(lfcn.outputMX()) << endl;*/
      
          bool adjoint_mode = true;
          if(adjoint_mode){
          
            // Gradient of the lagrangian
            MX gL = lfcn.grad();
            log("MX Lagrangian gradient generated");

            MXFunction glfcn(lfcn_in,gL);
            glfcn.init();
            log("MX Lagrangian gradient function initialized");
//           cout << "countNodes(glfcn.outputMX()) = " << countNodes(glfcn.outputMX()) << endl;

            // Get Hessian sparsity
            CRSSparsity H_sp = glfcn.jacSparsity();
            log("MX Lagrangian Hessian sparsity determined");
            
            // Uni-directional coloring (note, the hessian is symmetric)
            CRSSparsity coloring = H_sp.unidirectionalColoring(H_sp);
            log("MX Lagrangian Hessian coloring determined");

            // Number of colors needed is the number of rows
            int nfwd_glfcn = coloring.size1();
            log("MX Lagrangian gradient function number of sensitivity directions determined");

            glfcn.setOption("number_of_fwd_dir",nfwd_glfcn);
            glfcn.updateNumSens();
            log("MX Lagrangian gradient function number of sensitivity directions updated");
            
            // Hessian of the Lagrangian
            H_ = glfcn.jacobian();
          } else {

            // Hessian of the Lagrangian
            H_ = lfcn.hessian();
            
          }
          log("MX Lagrangian Hessian function generated");
          
        } else {
          casadi_assert_message(0, "Automatic calculation of exact Hessian currently only for F and G both SXFunction or MXFunction ");
        }
      } // !F_sx.isNull() && !G_sx.isNull()
    } // G_.isNull()
  } // generate_hessian && H_.isNull()
  if(!H_.isNull() && !H_.isInit()) {
    H_.init();
    log("Hessian function initialized");
  }

  // Create a Jacobian if it does not already exists
  bool generate_jacobian = getOption("generate_jacobian");
  if(generate_jacobian && !G_.isNull() && J_.isNull()){
    log("Generating Jacobian");
    J_ = G_.jacobian();
    
    // Use live variables if SXFunction
    if(!shared_cast<SXFunction>(J_).isNull()){
      J_.setOption("live_variables",true);
    }
    log("Jacobian function generated");
  }
    
  if(!J_.isNull() && !J_.isInit()){
    J_.init();
    log("Jacobian function initialized");
  }

  
  if(!H_.isNull()) {
    if (parametric_) {
      casadi_assert_message(H_.getNumInputs()>=2, "Wrong number of input arguments to H for parametric NLP. Must be at least 2, but got " << G_.getNumInputs());
    } else {
      casadi_assert_message(H_.getNumInputs()>=1, "Wrong number of input arguments to H for non-parametric NLP. Must be at least 1, but got " << G_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option.");
    }
    casadi_assert_message(H_.getNumOutputs()>=1, "Wrong number of output arguments to H");
    casadi_assert_message(H_.input(0).numel()==n_,"Inconsistent dimensions");
    casadi_assert_message(H_.output().size1()==n_,"Inconsistent dimensions");
    casadi_assert_message(H_.output().size2()==n_,"Inconsistent dimensions");
  }

  if(!J_.isNull()){
    if (parametric_) {
      casadi_assert_message(J_.getNumInputs()==2, "Wrong number of input arguments to J for parametric NLP. Must be at least 2, but got " << G_.getNumInputs());
    } else {
      casadi_assert_message(J_.getNumInputs()==1, "Wrong number of input arguments to J for non-parametric NLP. Must be at least 1, but got " << G_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option.");
    }
    casadi_assert_message(J_.getNumOutputs()>=1, "Wrong number of output arguments to J");
    casadi_assert_message(J_.input().numel()==n_,"Inconsistent dimensions");
    casadi_assert_message(J_.output().size2()==n_,"Inconsistent dimensions");
  }

  if (parametric_) {
    sp_p = F_->input(1).sparsity();
    
    if (!G_.isNull()) casadi_assert_message(sp_p == G_->input(G_->getNumInputs()-1).sparsity(),"Parametric NLP has inconsistent parameter dimensions. F has got " << sp_p.dimString() << " as dimensions, while G has got " << G_->input(G_->getNumInputs()-1).dimString());
    if (!H_.isNull()) casadi_assert_message(sp_p == H_->input(H_->getNumInputs()-1).sparsity(),"Parametric NLP has inconsistent parameter dimensions. F has got " << sp_p.dimString() << " as dimensions, while H has got " << H_->input(H_->getNumInputs()-1).dimString());
    if (!J_.isNull()) casadi_assert_message(sp_p == J_->input(J_->getNumInputs()-1).sparsity(),"Parametric NLP has inconsistent parameter dimensions. F has got " << sp_p.dimString() << " as dimensions, while J has got " << J_->input(J_->getNumInputs()-1).dimString());
  }
  
  // Infinity
  double inf = numeric_limits<double>::infinity();
  
  // Allocate space for inputs
  input_.resize(NLP_NUM_IN - (parametric_? 0 : 1));
  input(NLP_X_INIT)      = DMatrix(n_,1,0);
  input(NLP_LBX)         = DMatrix(n_,1,-inf);
  input(NLP_UBX)         = DMatrix(n_,1, inf);
  input(NLP_LBG)         = DMatrix(m_,1,-inf);
  input(NLP_UBG)         = DMatrix(m_,1, inf);
  input(NLP_LAMBDA_INIT) = DMatrix(m_,1,0);
  if (parametric_) input(NLP_P) = DMatrix(sp_p,0);
  
  // Allocate space for outputs
  output_.resize(NLP_NUM_OUT);
  output(NLP_X_OPT)      = DMatrix(n_,1,0);
  output(NLP_COST)       = DMatrix(1,1,0);
  output(NLP_LAMBDA_X)   = DMatrix(n_,1,0);
  output(NLP_LAMBDA_G)   = DMatrix(m_,1,0);
  output(NLP_G)          = DMatrix(m_,1,0);
  
  if (hasSetOption("iteration_callback")) {
   callback_ = getOption("iteration_callback");
   if (!callback_.isNull()) {
     if (!callback_.isInit()) callback_.init();
     casadi_assert_message(callback_.getNumOutputs()==1, "Callback function should have one output, a scalar that indicates wether to break. 0 = continue");
     casadi_assert_message(callback_.output(0).size()==1, "Callback function should have one output, a scalar that indicates wether to break. 0 = continue");
     casadi_assert_message(callback_.getNumInputs()==NLP_NUM_OUT, "Callback function should have the output scheme of NLPSolver as input scheme. i.e. " <<NLP_NUM_OUT << " inputs instead of the " << callback_.getNumInputs() << " you provided." );
     for (int i=0;i<NLP_NUM_OUT;i++) {
       casadi_assert_message(callback_.input(i).sparsity()==output(i).sparsity(),
         "Callback function should have the output scheme of NLPSolver as input scheme. " << 
         "Input #" << i << " (" << getSchemeEntryEnumName(SCHEME_NLPOutput,i) <<  " aka '" << getSchemeEntryName(SCHEME_NLPOutput,i) << "') was found to be " << callback_.input(i).dimString() << " instead of expected " << output(i).dimString() << "."
       );
       callback_.input(i).setAll(0);
     }
   }
  }
  
  callback_step_ = getOption("iteration_callback_step");

  // Call the initialization method of the base class
  FXInternal::init();
}
  void LiftingLrDpleInternal::init() {

    form_ = getOptionEnumValue("form");

    // Initialize the base classes
    LrDpleInternal::init();

    casadi_assert_message(!pos_def_,
      "pos_def option set to True: Solver only handles the indefinite case.");
    casadi_assert_message(const_dim_,
      "const_dim option set to False: Solver only handles the True case.");

    // We will construct an MXFunction to facilitate the calculation of derivatives

    MX As = MX::sym("As", input(LR_DLE_A).sparsity());
    MX Vs = MX::sym("Vs", input(LR_DLE_V).sparsity());
    MX Cs = MX::sym("Cs", input(LR_DLE_C).sparsity());
    MX Hs = MX::sym("Hs", input(LR_DLE_H).sparsity());

    n_ = A_[0].size1();

    // Chop-up the arguments
    std::vector<MX> As_ = horzsplit(As, n_);
    std::vector<MX> Vs_ = horzsplit(Vs, V_[0].size2());
    std::vector<MX> Cs_ = horzsplit(Cs, V_[0].size2());
    std::vector<MX> Hs_;
    if (with_H_) {
      Hs_ = horzsplit(Hs, Hsi_);
    }

    MX A;
    if (K_==1) {
      A = As;
    } else {
      if (form_==0) {
        MX AL = diagcat(vector_slice(As_, range(As_.size()-1)));

        MX AL2 = horzcat(AL, MX::sparse(AL.size1(), As_[0].size2()));
        MX AT = horzcat(MX::sparse(As_[0].size1(), AL.size2()), As_.back());
        A = vertcat(AT, AL2);
      } else {
        MX AL = diagcat(reverse(vector_slice(As_, range(As_.size()-1))));

        MX AL2 = horzcat(MX::sparse(AL.size1(), As_[0].size2()), AL);
        MX AT = horzcat(As_.back(), MX::sparse(As_[0].size1(), AL.size2()));
        A = vertcat(AL2, AT);
      }
    }

    MX V;
    MX C;

    MX H;

    if (form_==0) {
      V = diagcat(Vs_.back(), diagcat(vector_slice(Vs_, range(Vs_.size()-1))));
      if (with_C_) {
        C = diagcat(Cs_.back(), diagcat(vector_slice(Cs_, range(Cs_.size()-1))));
      }
    } else {
      V = diagcat(diagcat(reverse(vector_slice(Vs_, range(Vs_.size()-1)))), Vs_.back());
      if (with_C_) {
        C = diagcat(diagcat(reverse(vector_slice(Cs_, range(Cs_.size()-1)))), Cs_.back());
      }
    }

    if (with_H_) {
      H = diagcat(form_==0? Hs_ : reverse(Hs_));
    }

    // Create an LrDleSolver instance
    solver_ = LrDleSolver(getOption(solvername()),
                          lrdleStruct("a", A.sparsity(),
                                      "v", V.sparsity(),
                                      "c", C.sparsity(),
                                      "h", H.sparsity()));
    solver_.setOption("Hs", Hss_);
    if (hasSetOption(optionsname())) solver_.setOption(getOption(optionsname()));
    solver_.init();

    std::vector<MX> v_in(LR_DPLE_NUM_IN);
    v_in[LR_DLE_A] = As;
    v_in[LR_DLE_V] = Vs;
    if (with_C_) {
      v_in[LR_DLE_C] = Cs;
    }
    if (with_H_) {
      v_in[LR_DLE_H] = Hs;
    }

    std::vector<MX> Pr = solver_.call(lrdpleIn("a", A, "v", V, "c", C, "h", H));

    MX Pf = Pr[0];

    std::vector<MX> Ps = with_H_ ? diagsplit(Pf, Hsi_) : diagsplit(Pf, n_);

    if (form_==1) {
      Ps = reverse(Ps);
    }

    f_ = MXFunction(v_in, dpleOut("p", horzcat(Ps)));
    f_.setInputScheme(SCHEME_LR_DPLEInput);
    f_.setOutputScheme(SCHEME_LR_DPLEOutput);
    f_.init();

    Wrapper::checkDimensions();

  }
Пример #28
0
  Function implicitRK(Function& f, const std::string& impl, const Dictionary& impl_options,
                      const MX& tf, int order, const std::string& scheme, int ne) {
    casadi_assert_message(ne>=1, "Parameter ne (number of elements must be at least 1), "
                          "but got " << ne << ".");
    casadi_assert_message(order==4, "Only RK order 4 is supported now.");
    casadi_assert_message(f.getNumInputs()==DAE_NUM_IN && f.getNumOutputs()==DAE_NUM_OUT,
                          "Supplied function must adhere to dae scheme.");
    casadi_assert_message(f.output(DAE_QUAD).isEmpty(),
                          "Supplied function cannot have quadrature states.");

    // Obtain collocation points
    std::vector<double> tau_root = collocationPoints(order, "legendre");

    // Retrieve collocation interpolating matrices
    std::vector < std::vector <double> > C;
    std::vector < double > D;
    collocationInterpolators(tau_root, C, D);

    // Retrieve problem dimensions
    int nx = f.input(DAE_X).size();
    int nz = f.input(DAE_Z).size();
    int np = f.input(DAE_P).size();

    //Variables for one finite element
    MX X = MX::sym("X", nx);
    MX P = MX::sym("P", np);
    MX V = MX::sym("V", order*(nx+nz)); // Unknowns

    MX X0 = X;

    // Components of the unknowns that correspond to states at collocation points
    std::vector<MX> Xc;Xc.reserve(order);
    Xc.push_back(X0);

    // Components of the unknowns that correspond to algebraic states at collocation points
    std::vector<MX> Zc;Zc.reserve(order);

    // Splitting the unknowns
    std::vector<int> splitPositions = range(0, order*nx, nx);
    if (nz>0) {
      std::vector<int> Zc_pos = range(order*nx, order*nx+(order+1)*nz, nz);
      splitPositions.insert(splitPositions.end(), Zc_pos.begin(), Zc_pos.end());
    } else {
      splitPositions.push_back(order*nx);
    }
    std::vector<MX> Vs = vertsplit(V, splitPositions);

    // Extracting unknowns from Z
    for (int i=0;i<order;++i) {
      Xc.push_back(X0+Vs[i]);
    }
    if (nz>0) {
      for (int i=0;i<order;++i) {
        Zc.push_back(Vs[order+i]);
      }
    }

    // Get the collocation Equations (that define V)
    std::vector<MX> V_eq;

    // Local start time
    MX t0_l=MX::sym("t0");
    MX h = MX::sym("h");

    for (int j=1;j<order+1;++j) {
      // Expression for the state derivative at the collocation point
      MX xp_j = 0;
      for (int r=0;r<order+1;++r) {
        xp_j+= C[j][r]*Xc[r];
      }
      // Append collocation equations & algebraic constraints
      std::vector<MX> f_out;
      MX t_l = t0_l+tau_root[j]*h;
      if (nz>0) {
        f_out = f.call(daeIn("t", t_l, "x", Xc[j], "p", P, "z", Zc[j-1]));
      } else {
        f_out = f.call(daeIn("t", t_l, "x", Xc[j], "p", P));
      }
      V_eq.push_back(h*f_out[DAE_ODE]-xp_j);
      V_eq.push_back(f_out[DAE_ALG]);

    }

    // Root-finding function, implicitly defines V as a function of X0 and P
    std::vector<MX> vfcn_inputs;
    vfcn_inputs.push_back(V);
    vfcn_inputs.push_back(X);
    vfcn_inputs.push_back(P);
    vfcn_inputs.push_back(t0_l);
    vfcn_inputs.push_back(h);

    Function vfcn = MXFunction(vfcn_inputs, vertcat(V_eq));
    vfcn.init();

    try {
      // Attempt to convert to SXFunction to decrease overhead
      vfcn = SXFunction(vfcn);
      vfcn.init();
    } catch(CasadiException & e) {
      //
    }

    // Create a implicit function instance to solve the system of equations
    ImplicitFunction ifcn(impl, vfcn, Function(), LinearSolver());
    ifcn.setOption(impl_options);
    ifcn.init();

    // Get an expression for the state at the end of the finite element
    std::vector<MX> ifcn_call_in(5);
    ifcn_call_in[0] = MX::zeros(V.sparsity());
    std::copy(vfcn_inputs.begin()+1, vfcn_inputs.end(), ifcn_call_in.begin()+1);
    std::vector<MX> ifcn_call_out = ifcn.call(ifcn_call_in, true);
    Vs = vertsplit(ifcn_call_out[0], splitPositions);

    MX XF = 0;
    for (int i=0;i<order+1;++i) {
      XF += D[i]*(i==0? X : X + Vs[i-1]);
    }


    // Get the discrete time dynamics
    ifcn_call_in.erase(ifcn_call_in.begin());
    MXFunction F = MXFunction(ifcn_call_in, XF);
    F.init();

    // Loop over all finite elements
    MX h_ = tf/ne;
    MX t0_ = 0;

    for (int i=0;i<ne;++i) {
      std::vector<MX> F_in;
      F_in.push_back(X);
      F_in.push_back(P);
      F_in.push_back(t0_);
      F_in.push_back(h_);
      t0_+= h_;
      std::vector<MX> F_out = F.call(F_in);
      X = F_out[0];
    }

    // Create a ruturn function with Integrator signature
    MXFunction ret = MXFunction(integratorIn("x0", X0, "p", P), integratorOut("xf", X));
    ret.init();

    return ret;

  }
Пример #29
0
int main(){

  // Horizon length
  double tf = 3.0;
  
  // Number of subintervals
  int n = 30;
  
  // Time step
  MX dt = tf/n;
  
  // Parameter (should be treated as such)
  double x0 = 0.02;
  
  // Control
  MX u = msym("u",n);
  vector<double> lbu(n, -1.0);
  vector<double> ubu(n,  1.0);

  // Objective function terms
  vector<MX> ff;
    
  // Add control regularization
  ff.push_back(u);

  // Constraints
  vector<MX> gg;
  vector<double> lbg, ubg;

  // Lifting modes
  enum LiftMode{UNLIFTED, AUT_INIT, ZERO_INIT};
  LiftMode mode = ZERO_INIT;

  // Perform lifted single-shooting
  MX x = x0;
  for(int k=0; k<n; ++k){
    // Integrate
    x = x + dt*(x*(x+1) + u[k]);

    // Lift the state
    switch(mode){
    case AUT_INIT: x.lift(x); break;
    case ZERO_INIT: x.lift(0.); break;
    case UNLIFTED: break;
    }

    // Objective function terms
    ff.push_back(x);

    // State bounds
    gg.push_back(x);
    if(k==n-1){
      lbg.push_back( 0.0);
      ubg.push_back( 0.0);
    } else {
      lbg.push_back(-1.0);
      ubg.push_back( 1.0);
    }
  }

  // Gather least square terms and constraints
  MX f = vertcat(ff);
  MX g = vertcat(gg);
    
  // Use Gauss-Newton?
  bool gauss_newton = true;
  if(!gauss_newton){
    f = inner_prod(f,f)/2;
  }

  // Form the NLP
  MXFunction nlp(nlpIn("x",u),nlpOut("f",f,"g",g));
  SCPgen solver(nlp);

  //solver.setOption("verbose",true);
  solver.setOption("regularize",false);
  solver.setOption("codegen",false);
  solver.setOption("max_iter_ls",1);
  solver.setOption("max_iter",100);
  if(gauss_newton){
    solver.setOption("hessian_approximation","gauss-newton");
  }
  
  // Print the variables
  solver.setOption("print_x",range(0,n,5));

  Dictionary qp_solver_options;
  if(false){
    solver.setOption("qp_solver",NLPQpSolver::creator);
    qp_solver_options["nlp_solver"] = IpoptSolver::creator;
    Dictionary nlp_solver_options;
    nlp_solver_options["tol"] = 1e-12;
    nlp_solver_options["print_level"] = 0;
    nlp_solver_options["print_time"] = false;
    qp_solver_options["nlp_solver_options"] = nlp_solver_options;
      
  } else {
    solver.setOption("qp_solver",QPOasesSolver::creator);
    qp_solver_options["printLevel"] = "none";
  }
  solver.setOption("qp_solver_options",qp_solver_options);

  solver.init();
    
  // Pass bounds and solve
  solver.setInput(lbu,"lbx");
  solver.setInput(ubu,"ubx");
  solver.setInput(lbg,"lbg");
  solver.setInput(ubg,"ubg");
  solver.solve();

  cout << "u_opt = " << solver.output(NLP_SOLVER_X).data() << endl;

    
  return 0;
}
Пример #30
0
  void SDPSDQPInternal::init() {
    // Initialize the base classes
    SdqpSolverInternal::init();

    cholesky_ = LinearSolver("csparsecholesky", st_[SDQP_STRUCT_H]);
    cholesky_.init();

    MX g_socp = MX::sym("x", cholesky_.getFactorizationSparsity(true));
    MX h_socp = MX::sym("h", n_);

    MX f_socp = sqrt(inner_prod(h_socp, h_socp));
    MX en_socp = 0.5/f_socp;

    MX f_sdqp = MX::sym("f", input(SDQP_SOLVER_F).sparsity());
    MX g_sdqp = MX::sym("g", input(SDQP_SOLVER_G).sparsity());

    std::vector<MX> fi(n_+1);
    MX znp = MX::sparse(n_+1, n_+1);
    for (int k=0;k<n_;++k) {
      MX gk = vertcat(g_socp(ALL, k), DMatrix::sparse(1, 1));
      MX fk = -blockcat(znp, gk, gk.T(), DMatrix::sparse(1, 1));
      // TODO(Joel): replace with ALL
      fi.push_back(blkdiag(f_sdqp(ALL, Slice(f_sdqp.size1()*k, f_sdqp.size1()*(k+1))), fk));
    }
    MX fin = en_socp*DMatrix::eye(n_+2);
    fin(n_, n_+1) = en_socp;
    fin(n_+1, n_) = en_socp;

    fi.push_back(blkdiag(DMatrix::sparse(f_sdqp.size1(), f_sdqp.size1()), -fin));

    MX h0 = vertcat(h_socp, DMatrix::sparse(1, 1));
    MX g = blockcat(f_socp*DMatrix::eye(n_+1), h0, h0.T(), f_socp);

    g = blkdiag(g_sdqp, g);

    IOScheme mappingIn("g_socp", "h_socp", "f_sdqp", "g_sdqp");
    IOScheme mappingOut("f", "g");

    mapping_ = MXFunction(mappingIn("g_socp", g_socp, "h_socp", h_socp,
                                    "f_sdqp", f_sdqp, "g_sdqp", g_sdqp),
                          mappingOut("f", horzcat(fi), "g", g));
    mapping_.init();

    // Create an sdpsolver instance
    std::string sdpsolver_name = getOption("sdp_solver");
    sdpsolver_ = SdpSolver(sdpsolver_name,
                           sdpStruct("g", mapping_.output("g").sparsity(),
                                     "f", mapping_.output("f").sparsity(),
                                     "a", horzcat(input(SDQP_SOLVER_A).sparsity(),
                                                  Sparsity::sparse(nc_, 1))));

    if (hasSetOption("sdp_solver_options")) {
      sdpsolver_.setOption(getOption("sdp_solver_options"));
    }

    // Initialize the SDP solver
    sdpsolver_.init();

    sdpsolver_.input(SDP_SOLVER_C).at(n_)=1;

    // Output arguments
    setNumOutputs(SDQP_SOLVER_NUM_OUT);
    output(SDQP_SOLVER_X) = DMatrix::zeros(n_, 1);

    std::vector<int> r = range(input(SDQP_SOLVER_G).size1());
    output(SDQP_SOLVER_P) = sdpsolver_.output(SDP_SOLVER_P).isEmpty() ? DMatrix() :
        sdpsolver_.output(SDP_SOLVER_P)(r, r);
    output(SDQP_SOLVER_DUAL) = sdpsolver_.output(SDP_SOLVER_DUAL).isEmpty() ? DMatrix() :
        sdpsolver_.output(SDP_SOLVER_DUAL)(r, r);
    output(SDQP_SOLVER_COST) = 0.0;
    output(SDQP_SOLVER_DUAL_COST) = 0.0;
    output(SDQP_SOLVER_LAM_X) = DMatrix::zeros(n_, 1);
    output(SDQP_SOLVER_LAM_A) = DMatrix::zeros(nc_, 1);
  }