void SymbolicQr::evaluateSXGen(const SXPtrV& input, SXPtrV& output, bool tr) { // Get arguments casadi_assert(input.at(0)!=0); SX r = *input.at(0); casadi_assert(input.at(1)!=0); SX A = *input.at(1); // Number of right hand sides int nrhs = r.size2(); // Factorize A vector<SX> v = fact_fcn_(A); // Select solve function Function& solv = tr ? solv_fcn_T_ : solv_fcn_N_; // Solve for every right hand side vector<SX> resv; v.resize(3); for (int i=0; i<nrhs; ++i) { v[2] = r(Slice(), i); resv.push_back(solv(v).at(0)); } // Collect the right hand sides casadi_assert(output[0]!=0); *output.at(0) = horzcat(resv); }
bool SX::isEqual(const SX& ex, int depth) const{ if(node==ex.get()) return true; else if(depth>0) return node->isEqual(ex.get(),depth); else return false; }
SX MultipleShooting::getOutput(string o) { SX ret = ssym(o, N); for (int k=0; k<N; k++) ret.at(k) = getOutput(o, k); return ret; }
/** \brief Create a binary expression */ inline static SX create(unsigned char op, const SX& dep0, const SX& dep1){ if(dep0.isConstant() && dep1.isConstant()){ // Evaluate constant double dep0_val = dep0.getValue(); double dep1_val = dep1.getValue(); double ret_val; casadi_math<double>::fun(op,dep0_val,dep1_val,ret_val); return ret_val; } else { // Expression containing free variables return SX::create(new BinarySX(op,dep0,dep1)); } }
bool SX::isEquivalent(const SX& y, int depth) const{ if (isEqual(y)) return true; if (isConstant() && y.isConstant()) return y.getValue()==getValue(); if (depth==0) return false; if (hasDep() && y.hasDep() && getOp()==y.getOp()) { if (getDep(0).isEquivalent(y.getDep(0),depth-1) && getDep(1).isEquivalent(y.getDep(1),depth-1)) return true; return (operation_checker<CommChecker>(getOp()) && getDep(0).isEquivalent(y.getDep(1),depth-1) && getDep(1).isEquivalent(y.getDep(0),depth-1)); } return false; }
void QpToNlp::init(const Dict& opts) { // Initialize the base classes Qpsol::init(opts); // Default options string nlpsol_plugin; Dict nlpsol_options; // Read user options for (auto&& op : opts) { if (op.first=="nlpsol") { nlpsol_plugin = op.second.to_string(); } else if (op.first=="nlpsol_options") { nlpsol_options = op.second; } } // Create a symbolic matrix for the decision variables SX X = SX::sym("X", n_, 1); // Parameters to the problem SX H = SX::sym("H", sparsity_in(QPSOL_H)); SX G = SX::sym("G", sparsity_in(QPSOL_G)); SX A = SX::sym("A", sparsity_in(QPSOL_A)); // Put parameters in a vector std::vector<SX> par; par.push_back(H.nonzeros()); par.push_back(G.nonzeros()); par.push_back(A.nonzeros()); // The nlp looks exactly like a mathematical description of the NLP SXDict nlp = {{"x", X}, {"p", vertcat(par)}, {"f", mtimes(G.T(), X) + 0.5*mtimes(mtimes(X.T(), H), X)}, {"g", mtimes(A, X)}}; // Create an Nlpsol instance casadi_assert_message(!nlpsol_plugin.empty(), "'nlpsol' option has not been set"); solver_ = nlpsol("nlpsol", nlpsol_plugin, nlp, nlpsol_options); alloc(solver_); // Allocate storage for NLP solver parameters alloc_w(solver_.nnz_in(NLPSOL_P), true); }
SX SXFunctionInternal::hess(int iind, int oind) { casadi_assert_message(output(oind).numel() == 1, "Function must be scalar"); SX g = grad(iind, oind); g.makeDense(); if (verbose()) userOut() << "SXFunctionInternal::hess: calculating gradient done " << endl; // Create function Dict opts; opts["verbose"] = getOption("verbose"); SXFunction gfcn("gfcn", make_vector(inputv_.at(iind)), make_vector(g), opts); // Calculate jacobian of gradient if (verbose()) { userOut() << "SXFunctionInternal::hess: calculating Jacobian " << endl; } SX ret = gfcn.jac(0, 0, false, true); if (verbose()) { userOut() << "SXFunctionInternal::hess: calculating Jacobian done" << endl; } // Return jacobian of the gradient return ret; }
int main(){ cout << "program started" << endl; // Dimensions int nu = 20; // Number of control segments int nj = 100; // Number of integration steps per control segment // optimization variable SX u = SX::sym("u", nu); // control SX s_0 = 0; // initial position SX v_0 = 0; // initial speed SX m_0 = 1; // initial mass SX dt = 10.0/(nj*nu); // time step SX alpha = 0.05; // friction SX beta = 0.1; // fuel consumption rate // Trajectory SX s_traj = SX::zeros(nu); SX v_traj = SX::zeros(nu); SX m_traj = SX::zeros(nu); // Integrate over the interval with Euler forward SX s = s_0, v = v_0, m = m_0; for(int k=0; k<nu; ++k){ for(int j=0; j<nj; ++j){ s += dt*v; v += dt / m * (u[k]- alpha * v*v); m += -dt * beta*u[k]*u[k]; } s_traj[k] = s; v_traj[k] = v; m_traj[k] = m; } // Objective function SX f = inner_prod(u, u); // Terminal constraints SX g; g.append(s); g.append(v); g.append(v_traj); // Create the NLP SXFunction nlp("nlp", nlpIn("x", u), nlpOut("f", f, "g", g)); // Allocate an NLP solver and buffers NlpSolver solver("solver", "ipopt", nlp); std::map<std::string, DMatrix> arg, res; // Bounds on u and initial condition arg["lbx"] = -10; arg["ubx"] = 10; arg["x0"] = 0.4; // Bounds on g vector<double> gmin(2), gmax(2); gmin[0] = gmax[0] = 10; gmin[1] = gmax[1] = 0; gmin.resize(2+nu, -numeric_limits<double>::infinity()); gmax.resize(2+nu, 1.1); arg["lbg"] = gmin; arg["ubg"] = gmax; // Solve the problem res = solver(arg); // Print the optimal cost double cost(res.at("f")); cout << "optimal cost: " << cost << endl; // Print the optimal solution vector<double> uopt(res.at("x")); cout << "optimal control: " << uopt << endl; // Get the state trajectory vector<double> sopt(nu), vopt(nu), mopt(nu); SXFunction xfcn("xfcn", make_vector(u), make_vector(s_traj, v_traj, m_traj)); assign_vector(sopt, vopt, mopt, xfcn(make_vector(res.at("x")))); cout << "position: " << sopt << endl; cout << "velocity: " << vopt << endl; cout << "mass: " << mopt << endl; // Create Matlab script to plot the solution ofstream file; string filename = "rocket_ipopt_results.m"; file.open(filename.c_str()); file << "% Results file from " __FILE__ << endl; file << "% Generated " __DATE__ " at " __TIME__ << endl; file << endl; file << "cost = " << cost << ";" << endl; file << "u = " << uopt << ";" << endl; // Save results to file file << "t = linspace(0,10.0," << nu << ");"<< endl; file << "s = " << sopt << ";" << endl; file << "v = " << vopt << ";" << endl; file << "m = " << mopt << ";" << endl; // Finalize the results file file << endl; file << "% Plot the results" << endl; file << "figure(1);" << endl; file << "clf;" << endl << endl; file << "subplot(2,2,1);" << endl; file << "plot(t,s);" << endl; file << "grid on;" << endl; file << "xlabel('time [s]');" << endl; file << "ylabel('position [m]');" << endl << endl; file << "subplot(2,2,2);" << endl; file << "plot(t,v);" << endl; file << "grid on;" << endl; file << "xlabel('time [s]');" << endl; file << "ylabel('velocity [m/s]');" << endl << endl; file << "subplot(2,2,3);" << endl; file << "plot(t,m);" << endl; file << "grid on;" << endl; file << "xlabel('time [s]');" << endl; file << "ylabel('mass [kg]');" << endl << endl; file << "subplot(2,2,4);" << endl; file << "plot(t,u);" << endl; file << "grid on;" << endl; file << "xlabel('time [s]');" << endl; file << "ylabel('Thrust [kg m/s^2]');" << endl << endl; file.close(); cout << "Results saved to \"" << filename << "\"" << endl; return 0; }
void Sqpmethod::init() { // Call the init method of the base class NlpSolverInternal::init(); // Read options max_iter_ = getOption("max_iter"); max_iter_ls_ = getOption("max_iter_ls"); c1_ = getOption("c1"); beta_ = getOption("beta"); merit_memsize_ = getOption("merit_memory"); lbfgs_memory_ = getOption("lbfgs_memory"); tol_pr_ = getOption("tol_pr"); tol_du_ = getOption("tol_du"); regularize_ = getOption("regularize"); exact_hessian_ = getOption("hessian_approximation")=="exact"; min_step_size_ = getOption("min_step_size"); // Get/generate required functions gradF(); jacG(); if (exact_hessian_) { hessLag(); } // Allocate a QP solver Sparsity H_sparsity = exact_hessian_ ? hessLag().output().sparsity() : Sparsity::dense(nx_, nx_); H_sparsity = H_sparsity + Sparsity::diag(nx_); Sparsity A_sparsity = jacG().isNull() ? Sparsity(0, nx_) : jacG().output().sparsity(); // QP solver options Dict qp_solver_options; if (hasSetOption("qp_solver_options")) { qp_solver_options = getOption("qp_solver_options"); } // Allocate a QP solver qp_solver_ = QpSolver("qp_solver", getOption("qp_solver"), make_map("h", H_sparsity, "a", A_sparsity), qp_solver_options); // Lagrange multipliers of the NLP mu_.resize(ng_); mu_x_.resize(nx_); // Lagrange gradient in the next iterate gLag_.resize(nx_); gLag_old_.resize(nx_); // Current linearization point x_.resize(nx_); x_cand_.resize(nx_); x_old_.resize(nx_); // Constraint function value gk_.resize(ng_); gk_cand_.resize(ng_); // Hessian approximation Bk_ = DMatrix::zeros(H_sparsity); // Jacobian Jk_ = DMatrix::zeros(A_sparsity); // Bounds of the QP qp_LBA_.resize(ng_); qp_UBA_.resize(ng_); qp_LBX_.resize(nx_); qp_UBX_.resize(nx_); // QP solution dx_.resize(nx_); qp_DUAL_X_.resize(nx_); qp_DUAL_A_.resize(ng_); // Gradient of the objective gf_.resize(nx_); // Create Hessian update function if (!exact_hessian_) { // Create expressions corresponding to Bk, x, x_old, gLag and gLag_old SX Bk = SX::sym("Bk", H_sparsity); SX x = SX::sym("x", input(NLP_SOLVER_X0).sparsity()); SX x_old = SX::sym("x", x.sparsity()); SX gLag = SX::sym("gLag", x.sparsity()); SX gLag_old = SX::sym("gLag_old", x.sparsity()); SX sk = x - x_old; SX yk = gLag - gLag_old; SX qk = mul(Bk, sk); // Calculating theta SX skBksk = inner_prod(sk, qk); SX omega = if_else(inner_prod(yk, sk) < 0.2 * inner_prod(sk, qk), 0.8 * skBksk / (skBksk - inner_prod(sk, yk)), 1); yk = omega * yk + (1 - omega) * qk; SX theta = 1. / inner_prod(sk, yk); SX phi = 1. / inner_prod(qk, sk); SX Bk_new = Bk + theta * mul(yk, yk.T()) - phi * mul(qk, qk.T()); // Inputs of the BFGS update function vector<SX> bfgs_in(BFGS_NUM_IN); bfgs_in[BFGS_BK] = Bk; bfgs_in[BFGS_X] = x; bfgs_in[BFGS_X_OLD] = x_old; bfgs_in[BFGS_GLAG] = gLag; bfgs_in[BFGS_GLAG_OLD] = gLag_old; bfgs_ = SXFunction("bfgs", bfgs_in, make_vector(Bk_new)); // Initial Hessian approximation B_init_ = DMatrix::eye(nx_); } // Header if (static_cast<bool>(getOption("print_header"))) { userOut() << "-------------------------------------------" << endl << "This is casadi::SQPMethod." << endl; if (exact_hessian_) { userOut() << "Using exact Hessian" << endl; } else { userOut() << "Using limited memory BFGS Hessian approximation" << endl; } userOut() << endl << "Number of variables: " << setw(9) << nx_ << endl << "Number of constraints: " << setw(9) << ng_ << endl << "Number of nonzeros in constraint Jacobian: " << setw(9) << A_sparsity.nnz() << endl << "Number of nonzeros in Lagrangian Hessian: " << setw(9) << H_sparsity.nnz() << endl << endl; } }
int main(){ cout << "program started" << endl; std::ofstream resfile; resfile.open ("results_biegler_10_1.txt"); // Test with different number of elements for(int N=1; N<=10; ++N){ // Degree of interpolating polynomial int K = 2; // Legrandre roots vector<double> tau_root(K+1); tau_root[0] = 0.; tau_root[1] = 0.211325; tau_root[2] = 0.788675; // Radau roots (K=3) /* tau_root[0] = 0; tau_root[1] = 0.155051; tau_root[2] = 0.644949; tau_root[3] = 1;*/ // Time SX t("t"); // Differential equation SX z("z"); SXFunction F(z,z*z - 2*z + 1); F.setOption("name","dz/dt"); F.init(); cout << F << endl; double z0 = -3; // Analytic solution SXFunction z_analytic(t, (4*t-3)/(3*t+1)); z_analytic.setOption("name","analytic solution"); z_analytic.init(); cout << z_analytic << endl; // Collocation point SX tau("tau"); // Step size double h = 1.0/N; // Lagrange polynomials vector<SXFunction> l(K+1); for(int j=0; j<=K; ++j){ SX L = 1; for(int k=0; k<=K; ++k) if(k != j) L *= (tau-tau_root[k])/(tau_root[j]-tau_root[k]); l[j] = SXFunction(tau,L); stringstream ss; ss << "l(" << j << ")"; l[j].setOption("name",ss.str()); l[j].init(); cout << l[j] << endl; } // Get the coefficients of the continuity equation vector<double> D(K+1); for(int j=0; j<=K; ++j){ l[j].setInput(1.0); l[j].evaluate(); l[j].getOutput(D[j]); } cout << "D = " << D << endl; // Get the coefficients of the collocation equation vector<vector<double> > C(K+1); for(int j=0; j<=K; ++j){ C[j].resize(K+1); for(int k=0; k<=K; ++k){ l[j].setInput(tau_root[k]); l[j].setFwdSeed(1.0); l[j].evaluate(1,0); l[j].getFwdSens(C[j][k]); } } cout << "C = " << C << endl; // Collocated states SX Z = ssym("Z",N,K+1); // State at final time // SX ZF("ZF"); // All variables SX x; x << vec(trans(Z)); // x << vec(ZF); cout << "x = " << x << endl; // Construct the "NLP" SX g; for(int i=0; i<N; ++i){ for(int k=1; k<=K; ++k){ // Add collocation equations to NLP SX rhs = 0; for(int j=0; j<=K; ++j) rhs += Z(i,j)*C[j][k]; g << (h*F.eval(SX(Z(i,k))) - rhs); } // Add continuity equation to NLP SX rhs = 0; for(int j=0; j<=K; ++j) rhs += D[j]*Z(i,j); if(i<N-1) g << (SX(Z(i+1,0)) - rhs); /* else g << (ZF - rhs);*/ } cout << "g = " << g << endl; SXFunction gfcn(x,g); // Dummy objective function SXFunction obj(x, Z(0,0)*Z(0,0)); // ---- // SOLVE THE NLP // ---- // Allocate an NLP solver IpoptSolver solver(obj,gfcn); // Set options solver.setOption("tol",1e-10); solver.setOption("hessian_approximation","limited-memory"); // pass_nonlinear_variables // initialize the solver solver.init(); // Initial condition vector<double> xinit(x.numel(),0); solver.setInput(xinit,"x0"); // Bounds on x vector<double> lbx(x.numel(),-100); vector<double> ubx(x.numel(), 100); lbx[0] = ubx[0] = z0; solver.setInput(lbx,"lbx"); solver.setInput(ubx,"ubx"); // Bounds on the constraints vector<double> lubg(g.numel(),0); solver.setInput(lubg,"lbg"); solver.setInput(lubg,"ubg"); // Solve the problem solver.solve(); // Print the time points vector<double> t_opt(N*(K+1)+1); for(int i=0; i<N; ++i) for(int j=0; j<=K; ++j) t_opt[j + (K+1)*i] = h*(i + tau_root[j]); t_opt.back() = 1; cout << "time points: " << t_opt << endl; resfile << t_opt << endl; // Print the optimal cost cout << "optimal cost: " << solver.output(NLP_SOLVER_F) << endl; // Print the optimal solution vector<double> xopt(x.numel()); solver.getOutput(xopt,"x"); cout << "optimal solution: " << xopt << endl; resfile << xopt << endl; } resfile.close(); return 0; }
DM value(const SX& x, const std::vector<MX>& values=std::vector<MX>()) const { return DM::nan(x.sparsity()); }
int main(){ cout << "program started" << endl; // Dimensions int nk = 100; // Number of control segments int nj = 100; // Number of integration steps per control segment // Control SX u = ssym("u",nk); // control // Number of states int nx = 3; // Intermediate variables with initial values and bounds SX v, v_def; DMatrix v_init, v_min, v_max; // Initial values and bounds for the state at the different stages DMatrix x_k_init = DMatrix::zeros(nx); DMatrix x_k_min = -DMatrix::inf(nx); DMatrix x_k_max = DMatrix::inf(nx); // Initial conditions DMatrix x_0 = DMatrix::zeros(nx); x_0[0] = 0; // x x_0[1] = 1; // y x_0[2] = 0; // lterm double tf = 10; SX dt = tf/(nj*nk); // time step // For all the shooting intervals SX x_k = x_0; SX ode_rhs(x_k.sparsity(),0); for(int k=0; k<nk; ++k){ // Get control SX u_k = u[k].at(0); // Integrate over the interval with Euler forward for(int j=0; j<nj; ++j){ // ODE right hand side ode_rhs[0] = (1 - x_k[1]*x_k[1])*x_k[0] - x_k[1] + u_k; ode_rhs[1] = x_k[0]; ode_rhs[2] = x_k[0]*x_k[0] + x_k[1]*x_k[1]; // Take a step x_k += dt*ode_rhs; } // Lift x v_def.append(x_k); v_init.append(x_k_init); v_min.append(x_k_min); v_max.append(x_k_max); // Allocate intermediate variables stringstream ss; ss << "v_" << k; x_k = ssym(ss.str(),nx); v.append(x_k); } // Objective function SX f = x_k[2] + (tf/nk)*inner_prod(u,u); // Terminal constraints SX g; g.append(x_k[0]); g.append(x_k[1]); // Bounds on g DMatrix g_min = DMatrix::zeros(2); DMatrix g_max = DMatrix::zeros(2); // Bounds on u and initial condition DMatrix u_min = -0.75*DMatrix::ones(nk); DMatrix u_max = 1.00*DMatrix::ones(nk); DMatrix u_init = DMatrix::zeros(nk); DMatrix xv_min = vertcat(u_min,v_min); DMatrix xv_max = vertcat(u_max,v_max); DMatrix xv_init = vertcat(u_init,v_init); DMatrix gv_min = vertcat(DMatrix::zeros(v.size()),g_min); DMatrix gv_max = vertcat(DMatrix::zeros(v.size()),g_max); // Formulate the full-space NLP SXFunction ffcn(vertcat(u,v),f); SXFunction gfcn(vertcat(u,v),vertcat(v_def-v,g)); Dictionary qp_solver_options; qp_solver_options["printLevel"] = "none"; // Solve using multiple NLP solvers enum Tests{IPOPT, LIFTED_SQP, FULLSPACE_SQP, OLD_SQP_METHOD, NUM_TESTS}; for(int test=0; test<NUM_TESTS; ++test){ // Get the nlp solver and NLP solver options NLPSolver nlp_solver; switch(test){ case IPOPT: cout << "Testing IPOPT" << endl; nlp_solver = IpoptSolver(ffcn,gfcn); nlp_solver.setOption("generate_hessian",true); nlp_solver.setOption("tol",1e-10); break; case LIFTED_SQP: cout << "Testing lifted SQP" << endl; nlp_solver = LiftedSQP(ffcn,gfcn); nlp_solver.setOption("qp_solver",QPOasesSolver::creator); nlp_solver.setOption("qp_solver_options",qp_solver_options); nlp_solver.setOption("num_lifted",v.size()); nlp_solver.setOption("toldx",1e-10); nlp_solver.setOption("verbose",true); break; case FULLSPACE_SQP: cout << "Testing fullspace SQP" << endl; nlp_solver = LiftedSQP(ffcn,gfcn); nlp_solver.setOption("qp_solver",QPOasesSolver::creator); nlp_solver.setOption("qp_solver_options",qp_solver_options); nlp_solver.setOption("num_lifted",0); nlp_solver.setOption("toldx",1e-10); nlp_solver.setOption("verbose",true); break; case OLD_SQP_METHOD: cout << "Testing old SQP method" << endl; nlp_solver = SQPMethod(ffcn,gfcn); nlp_solver.setOption("qp_solver",QPOasesSolver::creator); nlp_solver.setOption("qp_solver_options",qp_solver_options); nlp_solver.setOption("generate_hessian",true); } // initialize the solver nlp_solver.init(); // Initial guess and bounds nlp_solver.setInput(xv_min,"lbx"); nlp_solver.setInput(xv_max,"ubx"); nlp_solver.setInput(xv_init,"x0"); nlp_solver.setInput(gv_min,"lbg"); nlp_solver.setInput(gv_max,"ubg"); // Solve the problem nlp_solver.solve(); // Print the optimal solution // cout << "optimal cost: " << nlp_solver.output(NLP_SOLVER_F).toScalar() << endl; // cout << "optimal control: " << nlp_solver.output(NLP_SOLVER_X) << endl; // cout << "multipliers (u): " << nlp_solver.output(NLP_SOLVER_LAM_X) << endl; // cout << "multipliers (gb): " << nlp_solver.output(NLP_SOLVER_LAM_G) << endl; } return 0; }
void fill(SXMatrix& mat, const SX& val){ if(val->isZero()) mat.makeEmpty(mat.size1(),mat.size2()); else mat.makeDense(mat.size1(),mat.size2(),val); }
void AmplInterface::init(const Dict& opts) { // Call the init method of the base class Nlpsol::init(opts); // Set default options solver_ = "ipopt"; // Read user options for (auto&& op : opts) { if (op.first=="solver") { solver_ = op.first; } } // Extract the expressions casadi_assert(oracle().is_a("SXFunction"), "Only SX supported currently."); vector<SX> xp = oracle().sx_in(); vector<SX> fg = oracle()(xp); // Get x, p, f and g SX x = xp.at(NL_X); SX p = xp.at(NL_P); SX f = fg.at(NL_F); SX g = fg.at(NL_G); casadi_assert(p.is_empty(), "'p' currently not supported"); // Names of the variables, constraints vector<string> x_name, g_name; for (casadi_int i=0; i<nx_; ++i) x_name.push_back("x[" + str(i) + "]"); for (casadi_int i=0; i<ng_; ++i) g_name.push_back("g[" + str(i) + "]"); casadi_int max_x_name = x_name.back().size(); casadi_int max_g_name = g_name.empty() ? 0 : g_name.back().size(); // Calculate the Jacobian, gradient Sparsity jac_g = SX::jacobian(g, x).sparsity(); Sparsity jac_f = SX::jacobian(f, x).sparsity(); // Extract the shared subexpressions vector<SX> ex = {f, g}, v, vdef; shared(ex, v, vdef); f = ex[0]; g = ex[1]; // Header nl_init_ << "g3 1 1 0\n"; // Type of constraints nl_init_ << nx_ << " " // number of variables << ng_ << " " // number of constraints << 1 << " " // number of objectives << 0 << " " // number of ranges << 0 << " " // ? << 0 << "\n"; // number of logical constraints // Nonlinearity - assume all nonlinear for now TODO: Detect nl_init_ << ng_ << " " // nonlinear constraints << 1 << "\n"; // nonlinear objectives // Network constraints nl_init_ << 0 << " " // nonlinear << 0 << "\n"; // linear // Nonlinear variables nl_init_ << nx_ << " " // in constraints << nx_ << " " // in objectives << nx_ << "\n"; // in both // Linear network .. nl_init_ << 0 << " " // .. variables .. << 0 << " " // .. arith .. << 0 << " " // .. functions .. << 0 << "\n"; // .. flags // Discrete variables nl_init_ << 0 << " " // binary << 0 << " " // integer << 0 << " " // nonlinear in both << 0 << " " // nonlinear in constraints << 0 << "\n"; // nonlinear in objective // Nonzeros in the Jacobian, gradients nl_init_ << jac_g.nnz() << " " // nnz in Jacobian << jac_f.nnz() << "\n"; // nnz in gradients // Maximum name length nl_init_ << max_x_name << " " // constraints << max_g_name << "\n"; // variables // Shared subexpressions nl_init_ << v.size() << " " // both << 0 << " " // constraints << 0 << " " // objective << 0 << " " // c1 - constaint, but linear? << 0 << "\n"; // o1 - objective, but linear? // Create a function which evaluates f and g Function F("F", {vertcat(v), x}, {vertcat(vdef), f, g}, {"v", "x"}, {"vdef", "f", "g"}); // Iterate over the algoritm vector<string> work(F.sz_w()); // Loop over the algorithm for (casadi_int k=0; k<F.n_instructions(); ++k) { // Get the atomic operation casadi_int op = F.instruction_id(k); // Get the operation indices std::vector<casadi_int> o = F.instruction_output(k); casadi_int o0=-1, o1=-1, i0=-1, i1=-1; if (o.size()>0) o0 = o[0]; if (o.size()>1) o1 = o[1]; std::vector<casadi_int> i = F.instruction_input(k); if (i.size()>0) i0 = i[0]; if (i.size()>1) i1 = i[1]; switch (op) { case OP_CONST: work[o0] = "n" + str(F.instruction_constant(k)) + "\n"; break; case OP_INPUT: work[o0] = "v" + str(i0*v.size() + i1) + "\n"; break; case OP_OUTPUT: if (o0==0) { // Common subexpression nl_init_ << "V" << (x.nnz()+o1) << " 0 0\n" << work[i0]; } else if (o0==1) { // Nonlinear objective term nl_init_ << "O" << o1 << " 0\n" << work[i0]; } else { // Nonlinear constraint term nl_init_ << "C" << o1 << "\n" << work[i0]; } break; case OP_ADD: work[o0] = "o0\n" + work[i0] + work[i1]; break; case OP_SUB: work[o0] = "o1\n" + work[i0] + work[i1]; break; case OP_MUL: work[o0] = "o2\n" + work[i0] + work[i1]; break; case OP_DIV: work[o0] = "o3\n" + work[i0] + work[i1]; break; case OP_SQ: work[o0] = "o5\n" + work[i0] + "n2\n"; break; case OP_POW: work[o0] = "o5\n" + work[i0] + work[i1]; break; case OP_FLOOR: work[o0] = "o13\n" + work[i0]; break; case OP_CEIL: work[o0] = "o14\n" + work[i0]; break; case OP_FABS: work[o0] = "o15\n" + work[i0]; break; case OP_NEG: work[o0] = "o16\n" + work[i0]; break; case OP_TANH: work[o0] = "o37\n" + work[i0]; break; case OP_TAN: work[o0] = "o38\n" + work[i0]; break; case OP_SQRT: work[o0] = "o39\n" + work[i0]; break; case OP_SINH: work[o0] = "o40\n" + work[i0]; break; case OP_SIN: work[o0] = "o41\n" + work[i0]; break; case OP_LOG: work[o0] = "o43\n" + work[i0]; break; case OP_EXP: work[o0] = "o44\n" + work[i0]; break; case OP_COSH: work[o0] = "o45\n" + work[i0]; break; case OP_COS: work[o0] = "o46\n" + work[i0]; break; case OP_ATANH: work[o0] = "o47\n" + work[i0]; break; case OP_ATAN2: work[o0] = "o48\n" + work[i0] + work[i1]; break; case OP_ATAN: work[o0] = "o49\n" + work[i0]; break; case OP_ASINH: work[o0] = "o50\n" + work[i0]; break; case OP_ASIN: work[o0] = "o51\n" + work[i0]; break; case OP_ACOSH: work[o0] = "o52\n" + work[i0]; break; case OP_ACOS: work[o0] = "o53\n" + work[i0]; break; default: if (casadi_math<double>::ndeps(op)==1) { casadi_error(casadi_math<double>::print(op, "x") + " not supported"); } else { casadi_error(casadi_math<double>::print(op, "x", "y") + " not supported"); } } } // k segments, cumulative column count in jac_g const casadi_int *colind = jac_g.colind(), *row = jac_g.row(); nl_init_ << "k" << (nx_-1) << "\n"; for (casadi_int i=1; i<nx_; ++i) nl_init_ << colind[i] << "\n"; // J segments, rows in jac_g Sparsity sp = jac_g.T(); colind = sp.colind(), row = sp.row(); for (casadi_int i=0; i<ng_; ++i) { nl_init_ << "J" << i << " " << (colind[i+1]-colind[i]) << "\n"; for (casadi_int k=colind[i]; k<colind[i+1]; ++k) { casadi_int r=row[k]; nl_init_ << r << " " << 0 << "\n"; // no linear term } } // G segments, rows in jac_f sp = jac_f.T(); colind = sp.colind(), row = sp.row(); nl_init_ << "G" << 0 << " " << (colind[0+1]-colind[0]) << "\n"; for (casadi_int k=colind[0]; k<colind[0+1]; ++k) { casadi_int r=row[k]; nl_init_ << r << " " << 0 << "\n"; // no linear term } }
void dxdt(map<string,SX> &xDot, map<string,SX> &outputs, map<string,SX> state, map<string,SX> action, map<string,SX> param, SX t) { double g = 9.8; double L1 = 0.1; double L2 = 0.1; double m0 = 0.1; double mp = 0.03; double m1 = mp; double m2 = mp; double d1 = m0 + m1 + m2; double d2 = (0.5*m1 + m2)*L1; double d3 = 0.5 * m2 * L2; double d4 = ( m1/3 + m2 )*SQR(L1); double d5 = 0.5 * m2 * L1 * L2; double d6 = m2 * SQR(L2)/3; double f1 = (0.5*m1 + m2) * L1 * g; double f2 = 0.5 * m2 * L2 * g; // th0 = y(1); // th1 = y(2); // th2 = y(3); // th0d = y(4); // th1d = y(5); // th2d = y(6); SX th0 = state["th0"]; SX th1 = state["th1"]; SX th2 = state["th2"]; SX th0d = state["th0d"]; SX th1d = state["th1d"]; SX th2d = state["th2d"]; // D = [ d1, d2*cos(th1), d3*cos(th2); // d2*cos(th1), d4, d5*cos(th1-th2); // d3*cos(th2), d5*cos(th1-th2), d6;]; SX D( zerosSX(3,3) ); makeDense(D); D[0,0] = d1; D[0,1] = d2*cos(th1); D[0,2] = d3*cos(th2); D[1,0] = d2*cos(th1); D[1,1] = d4; D[1,2] = d5*cos(th1-th2); D[2,0] = d3*cos(th2); D[2,1] = d5*cos(th1-th2); D[2,2] = d6; // C = [0, -d2*sin(th1)*th1d, -d3*sin(th2)*th2d; // 0, 0, d5*sin(th1-th2)*th2d; // 0, -d5*sin(th1-th2)*th1d, 0;]; SX C( zerosSX(3,3) ); makeDense(C); C[0,0] = 0; C[0,1] = -d2*sin(th1)*th1d; C[0,2] = -d3*sin(th2)*th2d; C[1,0] = 0; C[1,1] = 0; C[1,2] = d5*sin(th1-th2)*th2d; C[2,0] = 0; C[2,1] = -d5*sin(th1-th2)*th1d; C[2,2] = 0; // G = [0; -f1*sin(th1); -f2*sin(th2);]; SX G( zerosSX(3,1) ); makeDense(G); G.at(0) = 0; G.at(1) = -f1*sin(th1); G.at(2) = -f2*sin(th2); // H = [1;0;0;]; SX H( zerosSX(3,1) ); makeDense(H); H.at(0) = 1; H.at(1) = 0; H.at(2) = 0; // dy(1:3) = y(4:6); xDot["th0"] = th0d; xDot["th1"] = th1d; xDot["th2"] = th2d; // dy(4:6) = D\( - C*y(4:6) - G + H*u ); SX vel( zerosSX(3,1) ); makeDense(vel); vel.at(0) = th0d; vel.at(1) = th1d; vel.at(2) = th2d; SX accel = mul( inv(D), - mul( C, vel ) - G + mul( H, SX(action["u"]) ) ); simplify(accel.at(0)); simplify(accel.at(1)); simplify(accel.at(2)); xDot["th0d"] = accel.at(0); xDot["th1d"] = accel.at(1); xDot["th2d"] = accel.at(2); // cout << th0 << endl; // cout << th1 << endl; // cout << th2 << endl; // cout << th0d << endl; // cout << th1d << endl; // cout << th2d << endl; // cout << accel.at(0) << endl; // cout << accel.at(1) << endl; // cout << accel.at(2) << endl; outputs["cart_x"] = th0; outputs["cart_y"] = 0; outputs["bob0_x"] = th0 + L1*sin(th1); outputs["bob0_y"] = - L1*cos(th1); outputs["bob1_x"] = th0 + L1*sin(th1) + L2*sin(th2); outputs["bob1_y"] = - L1*cos(th1) - L2*cos(th2); }
void Ddp::setupQFunctions() { /*************** inputs **************/ SX xk = ssym("xk", ode.nx()); SX uk = ssym("uk", ode.nu()); SX V_0_kp1( ssym( "V_0_kp1", 1, 1) ); SX V_x_kp1( ssym( "V_x_kp1", ode.nx(), 1) ); SX V_xx_kp1( ssym( "V_xx_kp1", ode.nx(), ode.nx()) ); vector<SX> qInputs(NUM_Q_INPUTS); qInputs.at(IDX_Q_INPUTS_X_K) = xk; qInputs.at(IDX_Q_INPUTS_U_K) = uk; qInputs.at(IDX_Q_INPUTS_V_0_KP1) = V_0_kp1; qInputs.at(IDX_Q_INPUTS_V_X_KP1) = V_x_kp1; qInputs.at(IDX_Q_INPUTS_V_XX_KP1) = V_xx_kp1; SX dx = ssym("dx", ode.nx()); SX du = ssym("du", ode.nu()); /**************** dynamics *********************/ // dummy params for now map<string,SX> dummyParams; double dt = (tf - t0)/(N - 1); SX f = ode.rk4Step( xk + dx, uk + du, uk + du, dummyParams, t0, dt); // timestep dependent: f(x,u,__t__) - same as cost SX f0 = ode.rk4Step( xk, uk, uk, dummyParams, t0, dt); // timestep dependent: f(x,u,__t__) - same as cost // SX f = ode.eulerStep( xk + dx, uk + du, dummyParams, SX(t0), SX(dt)); // timestep dependent: f(x,u,__t__) - same as cost // SX f0 = ode.eulerStep( xk, uk, dummyParams, SX(t0), SX(dt)); // timestep dependent: f(x,u,__t__) - same as cost /**************** Q function *********************/ SX xk_p_dx( xk + dx ); SX uk_p_du( uk + du ); map<string,SX> xkMap = ode.getStateMap( xk_p_dx ); map<string,SX> ukMap = ode.getActionMap( uk_p_du ); // map<string,SX> xkMap = ode.getStateMap( xk ); // map<string,SX> ukMap = ode.getActionMap( uk ); SX df = f - f0; SX dxZeros( zerosSX( ode.nx(), 1 ) ); SX duZeros( zerosSX( ode.nu(), 1 ) ); makeDense( dxZeros ); makeDense( duZeros ); for (int k=0; k<N; k++){ // function SX Q_0_k(costFcnExt( xkMap, ukMap, k, N ) + V_0_kp1 + mul( V_x_kp1.trans(), df ) + mul( df.trans(), mul( V_xx_kp1, df ) )/2 ); // jacobian SX Q_x_k = gradient( Q_0_k, dx ); SX Q_u_k = gradient( Q_0_k, du ); // hessian SX Q_xx_k = jacobian( Q_x_k, dx ); SX Q_xu_k = jacobian( Q_x_k, du ); // == jacobian( Q_u, x ).trans() SX Q_uu_k = jacobian( Q_u_k, du ); Q_0_k = substitute( Q_0_k, dx, dxZeros ); Q_x_k = substitute( Q_x_k, dx, dxZeros ); Q_u_k = substitute( Q_u_k, dx, dxZeros ); Q_xx_k = substitute( Q_xx_k, dx, dxZeros ); Q_xu_k = substitute( Q_xu_k, dx, dxZeros ); Q_uu_k = substitute( Q_uu_k, dx, dxZeros ); Q_0_k = substitute( Q_0_k, du, duZeros ); Q_x_k = substitute( Q_x_k, du, duZeros ); Q_u_k = substitute( Q_u_k, du, duZeros ); Q_xx_k = substitute( Q_xx_k, du, duZeros ); Q_xu_k = substitute( Q_xu_k, du, duZeros ); Q_uu_k = substitute( Q_uu_k, du, duZeros ); simplify(Q_0_k); simplify(Q_x_k); simplify(Q_u_k); simplify(Q_xx_k); simplify(Q_xu_k); simplify(Q_uu_k); // workaround bug where size() != size1()*size2() makeDense(Q_0_k); makeDense(Q_x_k); makeDense(Q_u_k); makeDense(Q_xx_k); makeDense(Q_xu_k); makeDense(Q_uu_k); // outputs vector<SX> qOutputs(NUM_Q_OUTPUTS); qOutputs.at(IDX_Q_OUTPUTS_Q_0_K) = Q_0_k; qOutputs.at(IDX_Q_OUTPUTS_Q_X_K) = Q_x_k; qOutputs.at(IDX_Q_OUTPUTS_Q_U_K) = Q_u_k; qOutputs.at(IDX_Q_OUTPUTS_Q_XX_K) = Q_xx_k; qOutputs.at(IDX_Q_OUTPUTS_Q_XU_K) = Q_xu_k; qOutputs.at(IDX_Q_OUTPUTS_Q_UU_K) = Q_uu_k; // sx function qFunctions.push_back( SXFunction( qInputs, qOutputs ) ); qFunctions.at(k).init(); } }
SX SX::__mul__(const SX& y) const{ // Only simplifications that do not result in extra nodes area allowed if(!isConstant() && y.isConstant()) return y.__mul__(*this); else if(node->isZero() || y->isZero()) // one of the terms is zero return 0; else if(node->isOne()) // term1 is one return y; else if(y->isOne()) // term2 is one return *this; else if(y->isMinusOne()) return -(*this); else if(node->isMinusOne()) return -y; else if(y.hasDep() && y.getOp()==OP_INV) return (*this)/y.inv(); else if(hasDep() && getOp()==OP_INV) return y/inv(); else if(isConstant() && y.hasDep() && y.getOp()==OP_MUL && y.getDep(0).isConstant() && getValue()*y.getDep(0).getValue()==1) // 5*(0.2*x) = x return y.getDep(1); else if(isConstant() && y.hasDep() && y.getOp()==OP_DIV && y.getDep(1).isConstant() && getValue()==y.getDep(1).getValue()) // 5*(x/5) = x return y.getDep(0); else if(hasDep() && getOp()==OP_DIV && getDep(1).isEquivalent(y)) // ((2/x)*x) return getDep(0); else if(y.hasDep() && y.getOp()==OP_DIV && y.getDep(1).isEquivalent(*this)) // ((2/x)*x) return y.getDep(0); else // create a new branch return BinarySX::create(OP_MUL,*this,y); }
SX SX::__sub__(const SX& y) const{ // Only simplifications that do not result in extra nodes area allowed if(y->isZero()) // term2 is zero return *this; if(node->isZero()) // term1 is zero return -y; if(isEquivalent(y)) // the terms are equal return 0; else if(y.hasDep() && y.getOp()==OP_NEG) // x - (-y) -> x + y return __add__(-y); else if(hasDep() && getOp()==OP_ADD && getDep(1).isEquivalent(y)) return getDep(0); else if(hasDep() && getOp()==OP_ADD && getDep(0).isEquivalent(y)) return getDep(1); else if(y.hasDep() && y.getOp()==OP_ADD && isEquivalent(y.getDep(1))) return -y.getDep(0); else if(y.hasDep() && y.getOp()==OP_ADD && isEquivalent(y.getDep(0))) return -y.getDep(1); else // create a new branch return BinarySX::create(OP_SUB,*this,y); }
SX SX::__add__(const SX& y) const{ // NOTE: Only simplifications that do not result in extra nodes area allowed if(node->isZero()) return y; else if(y->isZero()) // term2 is zero return *this; else if(y.hasDep() && y.getOp()==OP_NEG) // x + (-y) -> x - y return __sub__(-y); else if(hasDep() && getOp()==OP_NEG) // (-x) + y -> y - x return y.__sub__(getDep()); else if(hasDep() && getOp()==OP_MUL && y.hasDep() && y.getOp()==OP_MUL && getDep(0).isConstant() && getDep(0).getValue()==0.5 && y.getDep(0).isConstant() && y.getDep(0).getValue()==0.5 && y.getDep(1).isEquivalent(getDep(1))) // 0.5x+0.5x = x return getDep(1); else if(hasDep() && getOp()==OP_DIV && y.hasDep() && y.getOp()==OP_DIV && getDep(1).isConstant() && getDep(1).getValue()==2 && y.getDep(1).isConstant() && y.getDep(1).getValue()==2 && y.getDep(0).isEquivalent(getDep(0))) // x/2+x/2 = x return getDep(0); else if(hasDep() && getOp()==OP_SUB && getDep(1).isEquivalent(y)) return getDep(0); else if(y.hasDep() && y.getOp()==OP_SUB && isEquivalent(y.getDep(1))) return y.getDep(0); else // create a new branch return BinarySX::create(OP_ADD,*this, y); }
int main(){ // Declare variables SX u = SX::sym("u"); // control SX r = SX::sym("r"), s = SX::sym("s"); // states SX x = vertcat(r,s); // Number of differential states int nx = x.size1(); // Number of controls int nu = u.size1(); // Bounds and initial guess for the control vector<double> u_min = { -0.75 }; vector<double> u_max = { 1.0 }; vector<double> u_init = { 0.0 }; // Bounds and initial guess for the state vector<double> x0_min = { 0, 1 }; vector<double> x0_max = { 0, 1 }; vector<double> x_min = {-inf, -inf }; vector<double> x_max = { inf, inf }; vector<double> xf_min = { 0, 0 }; vector<double> xf_max = { 0, 0 }; vector<double> x_init = { 0, 0 }; // Final time double tf = 20.0; // Number of shooting nodes int ns = 50; // ODE right hand side and quadrature SX ode = vertcat((1 - s*s)*r - s + u, r); SX quad = r*r + s*s + u*u; SXDict dae = {{"x", x}, {"p", u}, {"ode", ode}, {"quad", quad}}; // Create an integrator (CVodes) Function F = integrator("integrator", "cvodes", dae, {{"t0", 0}, {"tf", tf/ns}}); // Total number of NLP variables int NV = nx*(ns+1) + nu*ns; // Declare variable vector for the NLP MX V = MX::sym("V",NV); // NLP variable bounds and initial guess vector<double> v_min,v_max,v_init; // Offset in V int offset=0; // State at each shooting node and control for each shooting interval vector<MX> X, U; for(int k=0; k<ns; ++k){ // Local state X.push_back( V.nz(Slice(offset,offset+nx))); if(k==0){ v_min.insert(v_min.end(), x0_min.begin(), x0_min.end()); v_max.insert(v_max.end(), x0_max.begin(), x0_max.end()); } else { v_min.insert(v_min.end(), x_min.begin(), x_min.end()); v_max.insert(v_max.end(), x_max.begin(), x_max.end()); } v_init.insert(v_init.end(), x_init.begin(), x_init.end()); offset += nx; // Local control U.push_back( V.nz(Slice(offset,offset+nu))); v_min.insert(v_min.end(), u_min.begin(), u_min.end()); v_max.insert(v_max.end(), u_max.begin(), u_max.end()); v_init.insert(v_init.end(), u_init.begin(), u_init.end()); offset += nu; } // State at end X.push_back(V.nz(Slice(offset,offset+nx))); v_min.insert(v_min.end(), xf_min.begin(), xf_min.end()); v_max.insert(v_max.end(), xf_max.begin(), xf_max.end()); v_init.insert(v_init.end(), x_init.begin(), x_init.end()); offset += nx; // Make sure that the size of the variable vector is consistent with the number of variables that we have referenced casadi_assert(offset==NV); // Objective function MX J = 0; //Constraint function and bounds vector<MX> g; // Loop over shooting nodes for(int k=0; k<ns; ++k){ // Create an evaluation node MXDict I_out = F(MXDict{{"x0", X[k]}, {"p", U[k]}}); // Save continuity constraints g.push_back( I_out.at("xf") - X[k+1] ); // Add objective function contribution J += I_out.at("qf"); } // NLP MXDict nlp = {{"x", V}, {"f", J}, {"g", vertcat(g)}}; // Create an NLP solver and buffers Function solver = nlpsol("nlpsol", "blocksqp", nlp); std::map<std::string, DM> arg, res; // Bounds and initial guess arg["lbx"] = v_min; arg["ubx"] = v_max; arg["lbg"] = 0; arg["ubg"] = 0; arg["x0"] = v_init; // Solve the problem res = solver(arg); // Optimal solution of the NLP vector<double> V_opt(res.at("x")); // Get the optimal state trajectory vector<double> r_opt(ns+1), s_opt(ns+1); for(int i=0; i<=ns; ++i){ r_opt[i] = V_opt.at(i*(nx+1)); s_opt[i] = V_opt.at(1+i*(nx+1)); } cout << "r_opt = " << endl << r_opt << endl; cout << "s_opt = " << endl << s_opt << endl; // Get the optimal control vector<double> u_opt(ns); for(int i=0; i<ns; ++i){ u_opt[i] = V_opt.at(nx + i*(nx+1)); } cout << "u_opt = " << endl << u_opt << endl; return 0; }
void expand(const SXMatrix& ex2, SXMatrix &ww, SXMatrix& tt){ casadi_assert(ex2.scalar()); SX ex = ex2.toScalar(); // Terms, weights and indices of the nodes that are already expanded std::vector<std::vector<SXNode*> > terms; std::vector<std::vector<double> > weights; std::map<SXNode*,int> indices; // Stack of nodes that are not yet expanded std::stack<SXNode*> to_be_expanded; to_be_expanded.push(ex.get()); while(!to_be_expanded.empty()){ // as long as there are nodes to be expanded // Check if the last element on the stack is already expanded if (indices.find(to_be_expanded.top()) != indices.end()){ // Remove from stack to_be_expanded.pop(); continue; } // Weights and terms std::vector<double> w; // weights std::vector<SXNode*> f; // terms if(to_be_expanded.top()->isConstant()){ // constant nodes are seen as multiples of one w.push_back(to_be_expanded.top()->getValue()); f.push_back(casadi_limits<SX>::one.get()); } else if(to_be_expanded.top()->isSymbolic()){ // symbolic nodes have weight one and itself as factor w.push_back(1); f.push_back(to_be_expanded.top()); } else { // binary node casadi_assert(to_be_expanded.top()->hasDep()); // make sure that the node is binary // Check if addition, subtracton or multiplication SXNode* node = to_be_expanded.top(); // If we have a binary node that we can factorize if(node->getOp() == OP_ADD || node->getOp() == OP_SUB || (node->getOp() == OP_MUL && (node->dep(0)->isConstant() || node->dep(1)->isConstant()))){ // Make sure that both children are factorized, if not - add to stack if (indices.find(node->dep(0).get()) == indices.end()){ to_be_expanded.push(node->dep(0).get()); continue; } if (indices.find(node->dep(1).get()) == indices.end()){ to_be_expanded.push(node->dep(1).get()); continue; } // Get indices of children int ind1 = indices[node->dep(0).get()]; int ind2 = indices[node->dep(1).get()]; // If multiplication if(node->getOp() == OP_MUL){ double fac; if(node->dep(0)->isConstant()){ // Multiplication where the first factor is a constant fac = node->dep(0)->getValue(); f = terms[ind2]; w = weights[ind2]; } else { // Multiplication where the second factor is a constant fac = node->dep(1)->getValue(); f = terms[ind1]; w = weights[ind1]; } for(int i=0; i<w.size(); ++i) w[i] *= fac; } else { // if addition or subtraction if(node->getOp() == OP_ADD){ // Addition: join both sums f = terms[ind1]; f.insert(f.end(), terms[ind2].begin(), terms[ind2].end()); w = weights[ind1]; w.insert(w.end(), weights[ind2].begin(), weights[ind2].end()); } else { // Subtraction: join both sums with negative weights for second term f = terms[ind1]; f.insert(f.end(), terms[ind2].begin(), terms[ind2].end()); w = weights[ind1]; w.reserve(f.size()); for(int i=0; i<weights[ind2].size(); ++i) w.push_back(-weights[ind2][i]); } // Eliminate multiple elements std::vector<double> w_new; w_new.reserve(w.size()); // weights std::vector<SXNode*> f_new; f_new.reserve(f.size()); // terms std::map<SXNode*,int> f_ind; // index in f_new for(int i=0; i<w.size(); i++){ // Try to locate the node std::map<SXNode*,int>::iterator it = f_ind.find(f[i]); if(it == f_ind.end()){ // if the term wasn't found w_new.push_back(w[i]); f_new.push_back(f[i]); f_ind[f[i]] = f_new.size()-1; } else { // if the term already exists w_new[it->second] += w[i]; // just add the weight } } w = w_new; f = f_new; } } else { // if we have a binary node that we cannot factorize // By default, w.push_back(1); f.push_back(node); } } // Save factorization of the node weights.push_back(w); terms.push_back(f); indices[to_be_expanded.top()] = terms.size()-1; // Remove node from stack to_be_expanded.pop(); } // Save expansion to output int thisind = indices[ex.get()]; ww = SXMatrix(weights[thisind]); vector<SX> termsv(terms[thisind].size()); for(int i=0; i<termsv.size(); ++i) termsv[i] = SX::create(terms[thisind][i]); tt = SXMatrix(termsv); }
void SymbolicQr::init() { // Call the base class initializer LinearSolverInternal::init(); // Read options bool codegen = getOption("codegen"); string compiler = getOption("compiler"); // Make sure that command processor is available if (codegen) { #ifdef WITH_DL int flag = system(static_cast<const char*>(0)); casadi_assert_message(flag!=0, "No command procesor available"); #else // WITH_DL casadi_error("Codegen requires CasADi to be compiled with option \"WITH_DL\" enabled"); #endif // WITH_DL } // Symbolic expression for A SX A = SX::sym("A", input(0).sparsity()); // Get the inverted column permutation std::vector<int> inv_colperm(colperm_.size()); for (int k=0; k<colperm_.size(); ++k) inv_colperm[colperm_[k]] = k; // Get the inverted row permutation std::vector<int> inv_rowperm(rowperm_.size()); for (int k=0; k<rowperm_.size(); ++k) inv_rowperm[rowperm_[k]] = k; // Permute the linear system SX Aperm = A(rowperm_, colperm_); // Generate the QR factorization function vector<SX> QR(2); qr(Aperm, QR[0], QR[1]); SXFunction fact_fcn(A, QR); // Optionally generate c code and load as DLL if (codegen) { stringstream ss; ss << "symbolic_qr_fact_fcn_" << this; fact_fcn_ = dynamicCompilation(fact_fcn, ss.str(), "Symbolic QR factorization function", compiler); } else { fact_fcn_ = fact_fcn; } // Initialize factorization function fact_fcn_.setOption("name", "QR_fact"); fact_fcn_.init(); // Symbolic expressions for solve function SX Q = SX::sym("Q", QR[0].sparsity()); SX R = SX::sym("R", QR[1].sparsity()); SX b = SX::sym("b", input(1).size1(), 1); // Solve non-transposed // We have Pb' * Q * R * Px * x = b <=> x = Px' * inv(R) * Q' * Pb * b // Permute the right hand sides SX bperm = b(rowperm_, ALL); // Solve the factorized system SX xperm = casadi::solve(R, mul(Q.T(), bperm)); // Permute back the solution SX x = xperm(inv_colperm, ALL); // Generate the QR solve function vector<SX> solv_in(3); solv_in[0] = Q; solv_in[1] = R; solv_in[2] = b; SXFunction solv_fcn(solv_in, x); // Optionally generate c code and load as DLL if (codegen) { stringstream ss; ss << "symbolic_qr_solv_fcn_N_" << this; solv_fcn_N_ = dynamicCompilation(solv_fcn, ss.str(), "QR_solv_N", compiler); } else { solv_fcn_N_ = solv_fcn; } // Initialize solve function solv_fcn_N_.setOption("name", "QR_solv"); solv_fcn_N_.init(); // Solve transposed // We have (Pb' * Q * R * Px)' * x = b // <=> Px' * R' * Q' * Pb * x = b // <=> x = Pb' * Q * inv(R') * Px * b // Permute the right hand side bperm = b(colperm_, ALL); // Solve the factorized system xperm = mul(Q, casadi::solve(R.T(), bperm)); // Permute back the solution x = xperm(inv_rowperm, ALL); // Mofify the QR solve function solv_fcn = SXFunction(solv_in, x); // Optionally generate c code and load as DLL if (codegen) { stringstream ss; ss << "symbolic_qr_solv_fcn_T_" << this; solv_fcn_T_ = dynamicCompilation(solv_fcn, ss.str(), "QR_solv_T", compiler); } else { solv_fcn_T_ = solv_fcn; } // Initialize solve function solv_fcn_T_.setOption("name", "QR_solv_T"); solv_fcn_T_.init(); // Allocate storage for QR factorization Q_ = DMatrix::zeros(Q.sparsity()); R_ = DMatrix::zeros(R.sparsity()); }
int main(){ // Declare variables SX u = SX::sym("u"); // control SX r = SX::sym("r"), s = SX::sym("s"); // states SX x = vertcat(r,s); // Number of differential states int nx = x.size1(); // Number of controls int nu = u.size1(); // Bounds and initial guess for the control vector<double> u_min = { -0.75 }; vector<double> u_max = { 1.0 }; vector<double> u_init = { 0.0 }; // Bounds and initial guess for the state vector<double> x0_min = { 0, 1 }; vector<double> x0_max = { 0, 1 }; vector<double> x_min = {-inf, -inf }; vector<double> x_max = { inf, inf }; vector<double> xf_min = { 0, 0 }; vector<double> xf_max = { 0, 0 }; vector<double> x_init = { 0, 0 }; // Final time double tf = 20.0; // Number of shooting nodes int ns = 50; // ODE right hand side and quadrature SX ode = vertcat((1 - s*s)*r - s + u, r); SX quad = r*r + s*s + u*u; SXFunction rhs("rhs", daeIn("x", x, "p", u), daeOut("ode", ode, "quad", quad)); // Create an integrator (CVodes) Integrator integrator("integrator", "cvodes", rhs, make_dict("t0", 0, "tf", tf/ns)); // Total number of NLP variables int NV = nx*(ns+1) + nu*ns; // Declare variable vector for the NLP MX V = MX::sym("V",NV); // NLP variable bounds and initial guess vector<double> v_min,v_max,v_init; // Offset in V int offset=0; // State at each shooting node and control for each shooting interval vector<MX> X, U; for(int k=0; k<ns; ++k){ // Local state X.push_back( V[Slice(offset,offset+nx)] ); if(k==0){ v_min.insert(v_min.end(), x0_min.begin(), x0_min.end()); v_max.insert(v_max.end(), x0_max.begin(), x0_max.end()); } else { v_min.insert(v_min.end(), x_min.begin(), x_min.end()); v_max.insert(v_max.end(), x_max.begin(), x_max.end()); } v_init.insert(v_init.end(), x_init.begin(), x_init.end()); offset += nx; // Local control U.push_back( V[Slice(offset,offset+nu)] ); v_min.insert(v_min.end(), u_min.begin(), u_min.end()); v_max.insert(v_max.end(), u_max.begin(), u_max.end()); v_init.insert(v_init.end(), u_init.begin(), u_init.end()); offset += nu; } // State at end X.push_back(V[Slice(offset,offset+nx)]); v_min.insert(v_min.end(), xf_min.begin(), xf_min.end()); v_max.insert(v_max.end(), xf_max.begin(), xf_max.end()); v_init.insert(v_init.end(), x_init.begin(), x_init.end()); offset += nx; // Make sure that the size of the variable vector is consistent with the number of variables that we have referenced casadi_assert(offset==NV); // Objective function MX J = 0; //Constraint function and bounds vector<MX> g; // Loop over shooting nodes for(int k=0; k<ns; ++k){ // Create an evaluation node map<string, MX> I_out = integrator(make_map("x0", X[k], "p", U[k])); // Save continuity constraints g.push_back( I_out.at("xf") - X[k+1] ); // Add objective function contribution J += I_out.at("qf"); } // NLP MXFunction nlp("nlp", nlpIn("x", V), nlpOut("f", J, "g", vertcat(g))); // Set options Dict opts; opts["tol"] = 1e-5; opts["max_iter"] = 100; opts["linear_solver"] = "ma27"; // Create an NLP solver and buffers NlpSolver nlp_solver("nlp_solver", "ipopt", nlp, opts); std::map<std::string, DMatrix> arg, res; // Bounds and initial guess arg["lbx"] = v_min; arg["ubx"] = v_max; arg["lbg"] = 0; arg["ubg"] = 0; arg["x0"] = v_init; // Solve the problem res = nlp_solver(arg); // Optimal solution of the NLP const Matrix<double>& V_opt = res.at("x"); // Get the optimal state trajectory vector<double> r_opt(ns+1), s_opt(ns+1); for(int i=0; i<=ns; ++i){ r_opt[i] = V_opt.at(i*(nx+1)); s_opt[i] = V_opt.at(1+i*(nx+1)); } cout << "r_opt = " << endl << r_opt << endl; cout << "s_opt = " << endl << s_opt << endl; // Get the optimal control vector<double> u_opt(ns); for(int i=0; i<ns; ++i){ u_opt[i] = V_opt.at(nx + i*(nx+1)); } cout << "u_opt = " << endl << u_opt << endl; return 0; }
SX SX::__div__(const SX& y) const{ // Only simplifications that do not result in extra nodes area allowed if(y->isZero()) // term2 is zero return casadi_limits<SX>::nan; else if(node->isZero()) // term1 is zero return 0; else if(y->isOne()) // term2 is one return *this; else if(isEquivalent(y)) // terms are equal return 1; else if(isDoubled() && y.isEqual(2)) return node->dep(0); else if(isOp(OP_MUL) && y.isEquivalent(node->dep(0))) return node->dep(1); else if(isOp(OP_MUL) && y.isEquivalent(node->dep(1))) return node->dep(0); else if(node->isOne()) return y.inv(); else if(y.hasDep() && y.getOp()==OP_INV) return (*this)*y.inv(); else if(isDoubled() && y.isDoubled()) return node->dep(0) / y->dep(0); else if(y.isConstant() && hasDep() && getOp()==OP_DIV && getDep(1).isConstant() && y.getValue()*getDep(1).getValue()==1) // (x/5)/0.2 return getDep(0); else if(y.hasDep() && y.getOp()==OP_MUL && y.getDep(1).isEquivalent(*this)) // x/(2*x) = 1/2 return BinarySX::create(OP_DIV,1,y.getDep(0)); else if(hasDep() && getOp()==OP_NEG && getDep(0).isEquivalent(y)) // (-x)/x = -1 return -1; else if(y.hasDep() && y.getOp()==OP_NEG && y.getDep(0).isEquivalent(*this)) // x/(-x) = 1 return -1; else if(y.hasDep() && y.getOp()==OP_NEG && hasDep() && getOp()==OP_NEG && getDep(0).isEquivalent(y.getDep(0))) // (-x)/(-x) = 1 return 1; else if(isOp(OP_DIV) && y.isEquivalent(node->dep(0))) return node->dep(1).inv(); else // create a new branch return BinarySX::create(OP_DIV,*this,y); }
void LiftedSQPInternal::init(){ // Call the init method of the base class NlpSolverInternal::init(); // Number of lifted variables nv = getOption("num_lifted"); if(verbose_){ cout << "Initializing SQP method with " << nx_ << " variables and " << ng_ << " constraints." << endl; cout << "Lifting " << nv << " variables." << endl; if(gauss_newton_){ cout << "Gauss-Newton objective with " << F_.input().numel() << " terms." << endl; } } // Read options max_iter_ = getOption("max_iter"); max_iter_ls_ = getOption("max_iter_ls"); toldx_ = getOption("toldx"); tolgl_ = getOption("tolgl"); sigma_ = getOption("sigma"); rho_ = getOption("rho"); mu_safety_ = getOption("mu_safety"); eta_ = getOption("eta"); tau_ = getOption("tau"); // Assume SXFunction for now SXFunction ffcn = shared_cast<SXFunction>(F_); casadi_assert(!ffcn.isNull()); SXFunction gfcn = shared_cast<SXFunction>(G_); casadi_assert(!gfcn.isNull()); // Extract the free variables and split into independent and dependent variables SX x = ffcn.inputExpr(0); int nx = x.size(); nu = nx-nv; SX u = x[Slice(0,nu)]; SX v = x[Slice(nu,nu+nv)]; // Extract the constraint equations and split into constraints and definitions of dependent variables SX f1 = ffcn.outputExpr(0); int nf1 = f1.numel(); SX g = gfcn.outputExpr(0); int nf2 = g.numel()-nv; SX v_eq = g(Slice(0,nv)); SX f2 = g(Slice(nv,nv+nf2)); // Definition of v SX v_def = v_eq + v; // Objective function SX f; // Multipliers SX lam_x, lam_g, lam_f2; if(gauss_newton_){ // Least square objective f = inner_prod(f1,f1)/2; } else { // Scalar objective function f = f1; // Lagrange multipliers for the simple bounds on u SX lam_u = ssym("lam_u",nu); // Lagrange multipliers for the simple bounds on v SX lam_v = ssym("lam_v",nv); // Lagrange multipliers for the simple bounds on x lam_x = vertcat(lam_u,lam_v); // Lagrange multipliers corresponding to the definition of the dependent variables SX lam_v_eq = ssym("lam_v_eq",nv); // Lagrange multipliers for the nonlinear constraints that aren't eliminated lam_f2 = ssym("lam_f2",nf2); if(verbose_){ cout << "Allocated intermediate variables." << endl; } // Lagrange multipliers for constraints lam_g = vertcat(lam_v_eq,lam_f2); // Lagrangian function SX lag = f + inner_prod(lam_x,x); if(!f2.empty()) lag += inner_prod(lam_f2,f2); if(!v.empty()) lag += inner_prod(lam_v_eq,v_def); // Gradient of the Lagrangian SX lgrad = casadi::gradient(lag,x); if(!v.empty()) lgrad -= vertcat(SX::zeros(nu),lam_v_eq); // Put here to ensure that lgrad is of the form "h_extended -v_extended" makeDense(lgrad); if(verbose_){ cout << "Generated the gradient of the Lagrangian." << endl; } // Condensed gradient of the Lagrangian f1 = lgrad[Slice(0,nu)]; nf1 = nu; // Gradient of h SX v_eq_grad = lgrad[Slice(nu,nu+nv)]; // Reverse lam_v_eq and v_eq_grad SX v_eq_grad_reversed = v_eq_grad; copy(v_eq_grad.rbegin(),v_eq_grad.rend(),v_eq_grad_reversed.begin()); SX lam_v_eq_reversed = lam_v_eq; copy(lam_v_eq.rbegin(),lam_v_eq.rend(),lam_v_eq_reversed.begin()); // Augment h and lam_v_eq v_eq.append(v_eq_grad_reversed); v.append(lam_v_eq_reversed); } // Residual function G SXVector G_in(G_NUM_IN); G_in[G_X] = x; G_in[G_LAM_X] = lam_x; G_in[G_LAM_G] = lam_g; SXVector G_out(G_NUM_OUT); G_out[G_D] = v_eq; G_out[G_G] = g; G_out[G_F] = f; rfcn_ = SXFunction(G_in,G_out); rfcn_.setOption("number_of_fwd_dir",0); rfcn_.setOption("number_of_adj_dir",0); rfcn_.setOption("live_variables",true); rfcn_.init(); if(verbose_){ cout << "Generated residual function ( " << shared_cast<SXFunction>(rfcn_).getAlgorithmSize() << " nodes)." << endl; } // Difference vector d SX d = ssym("d",nv); if(!gauss_newton_){ vector<SX> dg = ssym("dg",nv).data(); reverse(dg.begin(),dg.end()); d.append(dg); } // Substitute out the v from the h SX d_def = (v_eq + v)-d; SXVector ex(3); ex[0] = f1; ex[1] = f2; ex[2] = f; substituteInPlace(v, d_def, ex, false); SX f1_z = ex[0]; SX f2_z = ex[1]; SX f_z = ex[2]; // Modified function Z enum ZIn{Z_U,Z_D,Z_LAM_X,Z_LAM_F2,Z_NUM_IN}; SXVector zfcn_in(Z_NUM_IN); zfcn_in[Z_U] = u; zfcn_in[Z_D] = d; zfcn_in[Z_LAM_X] = lam_x; zfcn_in[Z_LAM_F2] = lam_f2; enum ZOut{Z_D_DEF,Z_F12,Z_NUM_OUT}; SXVector zfcn_out(Z_NUM_OUT); zfcn_out[Z_D_DEF] = d_def; zfcn_out[Z_F12] = vertcat(f1_z,f2_z); SXFunction zfcn(zfcn_in,zfcn_out); zfcn.init(); if(verbose_){ cout << "Generated reconstruction function ( " << zfcn.getAlgorithmSize() << " nodes)." << endl; } // Matrix A and B in lifted Newton SX B = zfcn.jac(Z_U,Z_F12); SX B1 = B(Slice(0,nf1),Slice(0,B.size2())); SX B2 = B(Slice(nf1,B.size1()),Slice(0,B.size2())); if(verbose_){ cout << "Formed B1 (dimension " << B1.size1() << "-by-" << B1.size2() << ", "<< B1.size() << " nonzeros) " << "and B2 (dimension " << B2.size1() << "-by-" << B2.size2() << ", "<< B2.size() << " nonzeros)." << endl; } // Step in u SX du = ssym("du",nu); SX dlam_f2 = ssym("dlam_f2",lam_f2.sparsity()); SX b1 = f1_z; SX b2 = f2_z; SX e; if(nv > 0){ // Directional derivative of Z vector<vector<SX> > Z_fwdSeed(2,zfcn_in); vector<vector<SX> > Z_fwdSens(2,zfcn_out); vector<vector<SX> > Z_adjSeed; vector<vector<SX> > Z_adjSens; Z_fwdSeed[0][Z_U].setZero(); Z_fwdSeed[0][Z_D] = -d; Z_fwdSeed[0][Z_LAM_X].setZero(); Z_fwdSeed[0][Z_LAM_F2].setZero(); Z_fwdSeed[1][Z_U] = du; Z_fwdSeed[1][Z_D] = -d; Z_fwdSeed[1][Z_LAM_X].setZero(); Z_fwdSeed[1][Z_LAM_F2] = dlam_f2; zfcn.eval(zfcn_in,zfcn_out,Z_fwdSeed,Z_fwdSens,Z_adjSeed,Z_adjSens); b1 += Z_fwdSens[0][Z_F12](Slice(0,nf1)); b2 += Z_fwdSens[0][Z_F12](Slice(nf1,B.size1())); e = Z_fwdSens[1][Z_D_DEF]; } if(verbose_){ cout << "Formed b1 (dimension " << b1.size1() << "-by-" << b1.size2() << ", "<< b1.size() << " nonzeros) " << "and b2 (dimension " << b2.size1() << "-by-" << b2.size2() << ", "<< b2.size() << " nonzeros)." << endl; } // Generate Gauss-Newton Hessian if(gauss_newton_){ b1 = mul(trans(B1),b1); B1 = mul(trans(B1),B1); if(verbose_){ cout << "Gauss Newton Hessian (dimension " << B1.size1() << "-by-" << B1.size2() << ", "<< B1.size() << " nonzeros)." << endl; } } // Make sure b1 and b2 are dense vectors makeDense(b1); makeDense(b2); // Quadratic approximation SXVector lfcn_in(LIN_NUM_IN); lfcn_in[LIN_X] = x; lfcn_in[LIN_D] = d; lfcn_in[LIN_LAM_X] = lam_x; lfcn_in[LIN_LAM_G] = lam_g; SXVector lfcn_out(LIN_NUM_OUT); lfcn_out[LIN_F1] = b1; lfcn_out[LIN_J1] = B1; lfcn_out[LIN_F2] = b2; lfcn_out[LIN_J2] = B2; lfcn_ = SXFunction(lfcn_in,lfcn_out); // lfcn_.setOption("verbose",true); lfcn_.setOption("number_of_fwd_dir",0); lfcn_.setOption("number_of_adj_dir",0); lfcn_.setOption("live_variables",true); lfcn_.init(); if(verbose_){ cout << "Generated linearization function ( " << shared_cast<SXFunction>(lfcn_).getAlgorithmSize() << " nodes)." << endl; } // Step expansion SXVector efcn_in(EXP_NUM_IN); copy(lfcn_in.begin(),lfcn_in.end(),efcn_in.begin()); efcn_in[EXP_DU] = du; efcn_in[EXP_DLAM_F2] = dlam_f2; efcn_ = SXFunction(efcn_in,e); efcn_.setOption("number_of_fwd_dir",0); efcn_.setOption("number_of_adj_dir",0); efcn_.setOption("live_variables",true); efcn_.init(); if(verbose_){ cout << "Generated step expansion function ( " << shared_cast<SXFunction>(efcn_).getAlgorithmSize() << " nodes)." << endl; } // Current guess for the primal solution DMatrix &x_k = output(NLP_SOLVER_X); // Current guess for the dual solution DMatrix &lam_x_k = output(NLP_SOLVER_LAM_X); DMatrix &lam_g_k = output(NLP_SOLVER_LAM_G); // Allocate a QP solver QpSolverCreator qp_solver_creator = getOption("qp_solver"); qp_solver_ = qp_solver_creator(B1.sparsity(),B2.sparsity()); // Set options if provided if(hasSetOption("qp_solver_options")){ Dictionary qp_solver_options = getOption("qp_solver_options"); qp_solver_.setOption(qp_solver_options); } // Initialize the QP solver qp_solver_.init(); if(verbose_){ cout << "Allocated QP solver." << endl; } // Residual d_k_ = DMatrix(d.sparsity(),0); // Primal step dx_k_ = DMatrix(x_k.sparsity()); // Dual step dlam_x_k_ = DMatrix(lam_x_k.sparsity()); dlam_g_k_ = DMatrix(lam_g_k.sparsity()); }