void CondensingIndefDpleInternal::init() {
    // Initialize the base classes
    DpleInternal::init();

    casadi_assert_message(!pos_def_,
      "pos_def option set to True: Solver only handles the indefinite case.");
    casadi_assert_message(const_dim_,
      "const_dim option set to False: Solver only handles the True case.");

    n_ = A_[0].size1();


    MX As = MX::sym("A", horzcat(A_));
    MX Vs = MX::sym("V", horzcat(V_));

    std::vector< MX > Vss = horzsplit(Vs, n_);
    std::vector< MX > Ass = horzsplit(As, n_);

    for (int k=0;k<K_;++k) {
      Vss[k] = (Vss[k]+Vss[k].T())/2;
    }

    MX R = MX::zeros(n_, n_);

    for (int k=0;k<K_;++k) {
      R = mul(mul(Ass[k], R), Ass[k].T()) + Vss[k];
    }

    std::vector< MX > Assr(K_);
    std::reverse_copy(Ass.begin(), Ass.end(), Assr.begin());

    MX Ap = mul(Assr);

    // Create an dlesolver instance
    solver_ = DleSolver(getOption(solvername()), dleStruct("a", Ap.sparsity(), "v", R.sparsity()));
    solver_.setOption(getOption(optionsname()));

    // Initialize the NLP solver
    solver_.init();

    std::vector<MX> Pr = solver_.call(dpleIn("a", Ap, "v", R));

    std::vector<MX> Ps(K_);
    Ps[0] = Pr[0];

    for (int k=0;k<K_-1;++k) {
      Ps[k+1] = mul(mul(Ass[k], Ps[k]), Ass[k].T()) + Vss[k];
    }

    f_ = MXFunction(dpleIn("a", As, "v", Vs), dpleOut("p", horzcat(Ps)));
    f_.init();

    Wrapper::checkDimensions();

  }
  void SimpleIndefDpleInternal::init() {

    DpleInternal::init();

    casadi_assert_message(!pos_def_,
      "pos_def option set to True: Solver only handles the indefinite case.");
    casadi_assert_message(const_dim_,
      "const_dim option set to False: Solver only handles the True case.");

    n_ = A_[0].size1();


    MX As = MX::sym("A", n_, K_*n_);
    MX Vs = MX::sym("V", n_, K_*n_);

    std::vector< MX > Vss = horzsplit(Vs, n_);
    std::vector< MX > Ass = horzsplit(As, n_);

    for (int k=0;k<K_;++k) {
      Vss[k]=(Vss[k]+Vss[k].T())/2;
    }

    std::vector< MX > AA_list(K_);
    for (int k=0;k<K_;++k) {
      AA_list[k] = kron(Ass[k], Ass[k]);
    }

    MX AA = blkdiag(AA_list);

    MX A_total = DMatrix::eye(n_*n_*K_) -
        vertcat(AA(range(K_*n_*n_-n_*n_, K_*n_*n_), range(K_*n_*n_)),
                AA(range(K_*n_*n_-n_*n_), range(K_*n_*n_)));

    std::vector<MX> Vss_shift;
    Vss_shift.push_back(Vss.back());
    Vss_shift.insert(Vss_shift.end(), Vss.begin(), Vss.begin()+K_-1);

    MX Pf = solve(A_total, vec(horzcat(Vss_shift)), getOption("linear_solver"));
    MX P = reshape(Pf, n_, K_*n_);

    std::vector<MX> v_in;
    v_in.push_back(As);
    v_in.push_back(Vs);
    f_ = MXFunction(v_in, P);
    f_.setInputScheme(SCHEME_DPLEInput);
    f_.setOutputScheme(SCHEME_DPLEOutput);
    f_.init();
  }
Esempio n. 3
0
  void Horzsplit::evalFwd(const std::vector<cpv_MX>& fwdSeed, const std::vector<pv_MX>& fwdSens) {
    int nfwd = fwdSens.size();
    int nx = offset_.size()-1;

    // Get column offsets
    vector<int> col_offset;
    col_offset.reserve(offset_.size());
    col_offset.push_back(0);
    for (std::vector<Sparsity>::const_iterator it=output_sparsity_.begin();
        it!=output_sparsity_.end();
        ++it) {
      col_offset.push_back(col_offset.back() + it->size2());
    }

    // Non-differentiated output and forward sensitivities
    for (int d=0; d<nfwd; ++d) {
      const cpv_MX& arg = fwdSeed[d];
      const pv_MX& res = fwdSens[d];
      const MX& x = *arg[0];
      vector<MX> y = horzsplit(x, col_offset);
      for (int i=0; i<nx; ++i) {
        if (res[i]!=0) {
          *res[i] = y[i];
        }
      }
    }
  }
Esempio n. 4
0
 std::vector<Sparsity> vertsplit(const Sparsity& sp, const std::vector<int>& offset){
   std::vector<Sparsity> ret = horzsplit(sp.T(),offset);
   for(std::vector<Sparsity>::iterator it=ret.begin(); it!=ret.end(); ++it){
     *it = it->T();
   }
   return ret;
 }
Esempio n. 5
0
 HorzRepsum::HorzRepsum(const MX& x, int n) : n_(n) {
   casadi_assert(x.size2() % n == 0);
   std::vector<Sparsity> sp = horzsplit(x.sparsity(), x.size2()/n);
   Sparsity block = sp[0];
   for (int i=1;i<sp.size();++i) {
     block = block+sp[i];
   }
   Sparsity goal = repmat(block, 1, n);
   setDependencies(project(x, goal));
   setSparsity(block);
 }
Esempio n. 6
0
  void LrDpleToDple::init() {
    // Initialize the base classes
    LrDpleInternal::init();

    MX As = MX::sym("As", input(LR_DPLE_A).sparsity());
    MX Vs = MX::sym("Vs", input(LR_DPLE_V).sparsity());
    MX Cs = MX::sym("Cs", input(LR_DPLE_C).sparsity());
    MX Hs = MX::sym("Hs", input(LR_DPLE_H).sparsity());

    int n_ = A_[0].size1();

    // Chop-up the arguments
    std::vector<MX> As_ = horzsplit(As, n_);
    std::vector<MX> Vs_ = horzsplit(Vs, V_[0].size2());
    std::vector<MX> Cs_ = horzsplit(Cs, V_[0].size2());
    std::vector<MX> Hss_ = horzsplit(Hs, Hsi_);

    std::vector<MX> V_(Vs_.size());

    for (int k=0;k<V_.size();++k) {
      V_[k] = mul(Cs_[k], mul(Vs_[k], Cs_[k].T()));
    }

    std::vector<Sparsity> Vsp(Vs_.size());
    for (int k=0;k<V_.size();++k) {
      Vsp[k] = V_[k].sparsity();
    }

    // Solver options
    Dict options;
    if (hasSetOption(optionsname())) {
      options = getOption(optionsname());
    }

    // Create an dplesolver instance
    std::map<std::string, std::vector<Sparsity> > tmp;
    tmp["a"] = A_;
    tmp["v"] = Vsp;
    solver_ = DpleSolver("solver", getOption(solvername()), tmp, options);

    MX P = solver_(make_map("a", horzcat(As_), "v", horzcat(V_))).at("p");
    std::vector<MX> Ps_ = horzsplit(P, n_);

    std::vector<MX> HPH(K_);

    for (int k=0;k<K_;++k) {
      std::vector<MX> hph = horzsplit(Hss_[k], cumsum0(Hs_[k]));

      for (int kk=0;kk<hph.size();++kk) {
        hph[kk] = mul(hph[kk].T(), mul(Ps_[k], hph[kk]));
      }
      HPH[k] = diagcat(hph);
    }


    f_ = MXFunction(name_, lrdpleIn("a", As, "v", Vs, "c", Cs, "h", Hs),
                    lrdpleOut("y", horzcat(HPH)));

    Wrapper<LrDpleToDple>::checkDimensions();
  }
Esempio n. 7
0
  void Horzsplit::eval_mx(const std::vector<MX>& arg, std::vector<MX>& res) {
    // Get column offsets
    vector<int> col_offset;
    col_offset.reserve(offset_.size());
    col_offset.push_back(0);
    for (std::vector<Sparsity>::const_iterator it=output_sparsity_.begin();
        it!=output_sparsity_.end();
        ++it) {
      col_offset.push_back(col_offset.back() + it->size2());
    }

    res = horzsplit(arg[0], col_offset);
  }
Esempio n. 8
0
  Horzsplit::Horzsplit(const MX& x, const std::vector<int>& offset) : Split(x, offset) {

    // Split up the sparsity pattern
    output_sparsity_ = horzsplit(x.sparsity(), offset_);

    // Have offset_ refer to the nonzero offsets instead of column offsets
    offset_.resize(1);
    for (std::vector<Sparsity>::const_iterator it=output_sparsity_.begin();
        it!=output_sparsity_.end();
        ++it) {
      offset_.push_back(offset_.back() + it->nnz());
    }
  }
  void FixedSmithLrDleInternal::init() {
    iter_  = getOption("iter");

    LrDleInternal::init();

    casadi_assert_message(!pos_def_,
      "pos_def option set to True: Solver only handles the indefinite case.");

    MX H = MX::sym("H", H_);
    MX A = MX::sym("A", A_);
    MX C = MX::sym("C", C_);
    MX V = MX::sym("V", V_);

    MX Vs = (V+V.T())/2;

    MX D = with_C_ ? C : DMatrix::eye(A_.size1());


    std::vector<MX> HPH(Hs_.size(), 0);
    std::vector<MX> Hs = with_H_? horzsplit(H, Hi_) : std::vector<MX>();
    MX out = 0;

    for (int i=0;i<iter_;++i) {
      if (with_H_) {
        for (int k=0;k<Hs.size();++k) {
          MX v = mul(D.T(), Hs[k]);
          HPH[k]+= mul(v.T(), mul(Vs, v));
        }
      } else {
        out += mul(D, mul(Vs, D.T()));
      }
      D = mul(A, D);
    }

    std::vector<MX> dle_in(LR_DLE_NUM_IN);
    dle_in[LR_DLE_A] = A;
    dle_in[LR_DLE_V] = V;
    if (with_C_) dle_in[LR_DLE_C] = C;
    if (with_H_) dle_in[LR_DLE_H] = H;

    f_ = MXFunction(dle_in, lrdleOut("y", with_H_? diagcat(HPH): out));
    f_.init();

    Wrapper<FixedSmithLrDleInternal>::checkDimensions();

  }
Esempio n. 10
0
  void Horzsplit::evalFwd(const std::vector<std::vector<MX> >& fseed,
                          std::vector<std::vector<MX> >& fsens) {
    int nfwd = fsens.size();

    // Get column offsets
    vector<int> col_offset;
    col_offset.reserve(offset_.size());
    col_offset.push_back(0);
    for (std::vector<Sparsity>::const_iterator it=output_sparsity_.begin();
        it!=output_sparsity_.end();
        ++it) {
      col_offset.push_back(col_offset.back() + it->size2());
    }

    // Non-differentiated output and forward sensitivities
    for (int d=0; d<nfwd; ++d) {
      fsens[d] = horzsplit(fseed[d][0], col_offset);
    }
  }
Esempio n. 11
0
  void Horzsplit::eval(const cpv_MX& arg, const pv_MX& res) {
    int nx = offset_.size()-1;

    // Get column offsets
    vector<int> col_offset;
    col_offset.reserve(offset_.size());
    col_offset.push_back(0);
    for (std::vector<Sparsity>::const_iterator it=output_sparsity_.begin();
        it!=output_sparsity_.end();
        ++it) {
      col_offset.push_back(col_offset.back() + it->size2());
    }

    const MX& x = *arg[0];
    vector<MX> y = horzsplit(x, col_offset);
    for (int i=0; i<nx; ++i) {
      if (res[i]!=0) {
        *res[i] = y[i];
      }
    }
  }
Esempio n. 12
0
  void Horzcat::evaluateMX(const MXPtrV& input, MXPtrV& output, const MXPtrVV& fwdSeed,
                           MXPtrVV& fwdSens, const MXPtrVV& adjSeed, MXPtrVV& adjSens,
                           bool output_given) {
    int nfwd = fwdSens.size();
    int nadj = adjSeed.size();

    // Non-differentiated output
    if (!output_given) {
      *output[0] = horzcat(getVector(input));
    }

    // Forward sensitivities
    for (int d = 0; d<nfwd; ++d) {
      *fwdSens[d][0] = horzcat(getVector(fwdSeed[d]));
    }

    // Quick return?
    if (nadj==0) return;

    // Get offsets for each column
    vector<int> col_offset(ndep()+1, 0);
    for (int i=0; i<ndep(); ++i) {
      int ncol = dep(i).sparsity().size2();
      col_offset[i+1] = col_offset[i] + ncol;
    }

    // Adjoint sensitivities
    for (int d=0; d<nadj; ++d) {
      MX& aseed = *adjSeed[d][0];
      vector<MX> s = horzsplit(aseed, col_offset);
      aseed = MX();
      for (int i=0; i<ndep(); ++i) {
        adjSens[d][i]->addToSum(s[i]);
      }
    }
  }
  void LiftingLrDpleInternal::init() {

    form_ = getOptionEnumValue("form");

    // Initialize the base classes
    LrDpleInternal::init();

    casadi_assert_message(!pos_def_,
      "pos_def option set to True: Solver only handles the indefinite case.");
    casadi_assert_message(const_dim_,
      "const_dim option set to False: Solver only handles the True case.");

    // We will construct an MXFunction to facilitate the calculation of derivatives

    MX As = MX::sym("As", input(LR_DLE_A).sparsity());
    MX Vs = MX::sym("Vs", input(LR_DLE_V).sparsity());
    MX Cs = MX::sym("Cs", input(LR_DLE_C).sparsity());
    MX Hs = MX::sym("Hs", input(LR_DLE_H).sparsity());

    n_ = A_[0].size1();

    // Chop-up the arguments
    std::vector<MX> As_ = horzsplit(As, n_);
    std::vector<MX> Vs_ = horzsplit(Vs, V_[0].size2());
    std::vector<MX> Cs_ = horzsplit(Cs, V_[0].size2());
    std::vector<MX> Hs_;
    if (with_H_) {
      Hs_ = horzsplit(Hs, Hsi_);
    }

    MX A;
    if (K_==1) {
      A = As;
    } else {
      if (form_==0) {
        MX AL = diagcat(vector_slice(As_, range(As_.size()-1)));

        MX AL2 = horzcat(AL, MX::sparse(AL.size1(), As_[0].size2()));
        MX AT = horzcat(MX::sparse(As_[0].size1(), AL.size2()), As_.back());
        A = vertcat(AT, AL2);
      } else {
        MX AL = diagcat(reverse(vector_slice(As_, range(As_.size()-1))));

        MX AL2 = horzcat(MX::sparse(AL.size1(), As_[0].size2()), AL);
        MX AT = horzcat(As_.back(), MX::sparse(As_[0].size1(), AL.size2()));
        A = vertcat(AL2, AT);
      }
    }

    MX V;
    MX C;

    MX H;

    if (form_==0) {
      V = diagcat(Vs_.back(), diagcat(vector_slice(Vs_, range(Vs_.size()-1))));
      if (with_C_) {
        C = diagcat(Cs_.back(), diagcat(vector_slice(Cs_, range(Cs_.size()-1))));
      }
    } else {
      V = diagcat(diagcat(reverse(vector_slice(Vs_, range(Vs_.size()-1)))), Vs_.back());
      if (with_C_) {
        C = diagcat(diagcat(reverse(vector_slice(Cs_, range(Cs_.size()-1)))), Cs_.back());
      }
    }

    if (with_H_) {
      H = diagcat(form_==0? Hs_ : reverse(Hs_));
    }

    // Create an LrDleSolver instance
    solver_ = LrDleSolver(getOption(solvername()),
                          lrdleStruct("a", A.sparsity(),
                                      "v", V.sparsity(),
                                      "c", C.sparsity(),
                                      "h", H.sparsity()));
    solver_.setOption("Hs", Hss_);
    if (hasSetOption(optionsname())) solver_.setOption(getOption(optionsname()));
    solver_.init();

    std::vector<MX> v_in(LR_DPLE_NUM_IN);
    v_in[LR_DLE_A] = As;
    v_in[LR_DLE_V] = Vs;
    if (with_C_) {
      v_in[LR_DLE_C] = Cs;
    }
    if (with_H_) {
      v_in[LR_DLE_H] = Hs;
    }

    std::vector<MX> Pr = solver_.call(lrdpleIn("a", A, "v", V, "c", C, "h", H));

    MX Pf = Pr[0];

    std::vector<MX> Ps = with_H_ ? diagsplit(Pf, Hsi_) : diagsplit(Pf, n_);

    if (form_==1) {
      Ps = reverse(Ps);
    }

    f_ = MXFunction(v_in, dpleOut("p", horzcat(Ps)));
    f_.setInputScheme(SCHEME_LR_DPLEInput);
    f_.setOutputScheme(SCHEME_LR_DPLEOutput);
    f_.init();

    Wrapper::checkDimensions();

  }
  void SimpleIndefDleInternal::init() {

    DleInternal::init();

    casadi_assert_message(!pos_def_,
      "pos_def option set to True: Solver only handles the indefinite case.");


    n_ = A_.size1();

    MX As = MX::sym("A", A_);
    MX Vs = MX::sym("V", V_);
    MX Cs = MX::sym("C", C_);
    MX Hs = MX::sym("H", H_);

    MX Vss = (Vs+Vs.T())/2;
    if (with_C_) Vss = mul(mul(Cs, Vss), Cs.T());

    MX A_total = DMatrix::eye(n_*n_) - kron(As,As);

    // Should be treated by solve node
    MX Pf = solve(A_total, vec(Vss), getOption("linear_solver"));

    std::vector<MX> v_in;
    v_in.push_back(As);
    v_in.push_back(Vs);
    v_in.push_back(Cs);

    MX P = reshape(Pf,n_,n_);

    std::vector<MX> HPH;

    if (with_H_) {
      std::vector<MX> H = horzsplit(Hs,Hi_);

      for (int k=0;k<H.size();++k) {
        HPH.push_back(mul(H[k].T(),mul(P,H[k])));
      }
    }

    std::vector<MX> dle_in(DLE_NUM_IN);
    dle_in[DLE_A] = As;
    dle_in[DLE_V] = Vs;
    if (with_C_) dle_in[DLE_C] = Cs;
    if (with_H_) dle_in[DLE_H] = Hs;

    f_ = MXFunction(dle_in,dleOut("p",with_H_? diagcat(HPH) : P(output().sparsity())));

    f_.init();

    casadi_assert(nOut()==f_.nOut());
    for (int i=0;i<nIn();++i) {
      casadi_assert_message(input(i).sparsity()==f_.input(i).sparsity(),
        "Sparsity mismatch for input " << i << ":" <<
        input(i).dimString() << " <-> " << f_.input(i).dimString() << ".");
    }
    for (int i=0;i<nOut();++i) {
      casadi_assert_message(output(i).sparsity()==f_.output(i).sparsity(),
        "Sparsity mismatch for output " << i << ":" <<
        output(i).dimString() << " <-> " << f_.output(i).dimString() << ".");
    }
  }
  void ImplicitFunctionInternal::evaluateMX(
      MXNode* node, const MXPtrV& arg, MXPtrV& res,
      const MXPtrVV& fseed, MXPtrVV& fsens, const MXPtrVV& aseed, MXPtrVV& asens,
      bool output_given) {
    // Evaluate non-differentiated
    vector<MX> argv = MXNode::getVector(arg);
    MX z; // the solution to the system of equations
    if (output_given) {
      z = *res[iout_];
    } else {
      vector<MX> resv = callSelf(argv);
      for (int i=0; i<resv.size(); ++i) {
        if (res[i]!=0) *res[i] = resv[i];
      }
      z = resv[iout_];
    }

    // Quick return if no derivatives
    int nfwd = fsens.size();
    int nadj = aseed.size();
    if (nfwd==0 && nadj==0) return;

    // Temporaries
    vector<int> col_offset(1, 0);
    vector<MX> rhs;
    vector<int> rhs_loc;

    // Arguments when calling f/f_der
    vector<MX> v;
    v.reserve(getNumInputs()*(1+nfwd) + nadj);
    v.insert(v.end(), argv.begin(), argv.end());
    v[iin_] = z;

    // Get an expression for the Jacobian
    MX J = jac_.call(v).front();

    // Directional derivatives of f
    Function f_der = f_.derivative(nfwd, nadj);

    // Forward sensitivities, collect arguments for calling f_der
    for (int d=0; d<nfwd; ++d) {
      argv = MXNode::getVector(fseed[d]);
      argv[iin_] = MX::zeros(input(iin_).sparsity());
      v.insert(v.end(), argv.begin(), argv.end());
    }

    // Adjoint sensitivities, solve to get arguments for calling f_der
    if (nadj>0) {
      for (int d=0; d<nadj; ++d) {
        for (int i=0; i<getNumOutputs(); ++i) {
          if (aseed[d][i]!=0) {
            if (i==iout_) {
              rhs.push_back(*aseed[d][i]);
              col_offset.push_back(col_offset.back()+1);
              rhs_loc.push_back(v.size()); // where to store it
              v.push_back(MX());
            } else {
              v.push_back(*aseed[d][i]);
            }
          }
          *aseed[d][i] = MX();
        }
      }

      // Solve for all right-hand-sides at once
      rhs = horzsplit(J->getSolve(horzcat(rhs), true, linsol_), col_offset);
      for (int d=0; d<rhs.size(); ++d) {
        v[rhs_loc[d]] = rhs[d];
      }
      col_offset.resize(1);
      rhs.clear();
    }

    // Propagate through the implicit function
    v = f_der.call(v);
    vector<MX>::const_iterator v_it = v.begin();

    // Discard non-differentiated evaluation (change?)
    v_it += getNumOutputs();

    // Forward directional derivatives
    if (nfwd>0) {
      for (int d=0; d<nfwd; ++d) {
        for (int i=0; i<getNumOutputs(); ++i) {
          if (i==iout_) {
            // Collect the arguments
            rhs.push_back(*v_it++);
            col_offset.push_back(col_offset.back()+1);
          } else {
            // Auxiliary output
            if (fsens[d][i]!=0) {
              *fsens[d][i] = *v_it++;
            }
          }
        }
      }

      // Solve for all the forward derivatives at once
      rhs = horzsplit(J->getSolve(horzcat(rhs), false, linsol_), col_offset);
      for (int d=0; d<nfwd; ++d) {
        if (fsens[d][iout_]!=0) {
          *fsens[d][iout_] = -rhs[d];
        }
      }

      col_offset.resize(1);
      rhs.clear();
    }

    // Collect adjoint sensitivities
    for (int d=0; d<nadj; ++d) {
      for (int i=0; i<asens[d].size(); ++i, ++v_it) {
        if (i!=iin_ && asens[d][i]!=0 && !v_it->isNull()) {
          *asens[d][i] += - *v_it;
        }
      }
    }
    casadi_assert(v_it==v.end());
  }