// Constructor SdpSolverInternal::SdpSolverInternal(const std::vector<Sparsity> &st) : st_(st) { addOption("calc_p", OT_BOOLEAN, true, "Indicate if the P-part of primal solution should be allocated and calculated. " "You may want to avoid calculating this variable for problems with n large, " "as is always dense (m x m)."); addOption("calc_dual", OT_BOOLEAN, true, "Indicate if dual should be allocated and calculated. " "You may want to avoid calculating this variable for problems with n large, " "as is always dense (m x m)."); addOption("print_problem", OT_BOOLEAN, false, "Print out problem statement for debugging."); casadi_assert_message(st_.size()==SDP_STRUCT_NUM, "Problem structure mismatch"); const Sparsity& A = st_[SDP_STRUCT_A]; const Sparsity& G = st_[SDP_STRUCT_G]; const Sparsity& F = st_[SDP_STRUCT_F]; casadi_assert_message(G==G.transpose(), "SdpSolverInternal: Supplied G sparsity must " "symmetric but got " << G.dimString()); m_ = G.size1(); nc_ = A.size1(); n_ = A.size2(); casadi_assert_message(F.size1()==m_, "SdpSolverInternal: Supplied F sparsity: number of rows (" << F.size1() << ") must match m (" << m_ << ")"); casadi_assert_message(F.size2()%n_==0, "SdpSolverInternal: Supplied F sparsity: " "number of columns (" << F.size2() << ") must be an integer multiple of n (" << n_ << "), but got remainder " << F.size2()%n_); // Input arguments setNumInputs(SDP_SOLVER_NUM_IN); input(SDP_SOLVER_G) = DMatrix(G, 0); input(SDP_SOLVER_F) = DMatrix(F, 0); input(SDP_SOLVER_A) = DMatrix(A, 0); input(SDP_SOLVER_C) = DMatrix::zeros(n_); input(SDP_SOLVER_LBX) = -DMatrix::inf(n_); input(SDP_SOLVER_UBX) = DMatrix::inf(n_); input(SDP_SOLVER_LBA) = -DMatrix::inf(nc_); input(SDP_SOLVER_UBA) = DMatrix::inf(nc_); for (int i=0;i<n_;i++) { Sparsity s = input(SDP_SOLVER_F)(ALL, Slice(i*m_, (i+1)*m_)).sparsity(); casadi_assert_message(s==s.transpose(), "SdpSolverInternal: Each supplied Fi must be symmetric. " "But got " << s.dimString() << " for i = " << i << "."); } input_.scheme = SCHEME_SDPInput; output_.scheme = SCHEME_SDPOutput; }
// Constructor LpSolverInternal::LpSolverInternal(const std::vector<Sparsity> &st) : st_(st) { casadi_assert_message(st_.size()==LP_STRUCT_NUM, "Problem structure mismatch"); const Sparsity& A = st_[LP_STRUCT_A]; n_ = A.size2(); nc_ = A.size1(); // Input arguments setNumInputs(LP_SOLVER_NUM_IN); input(LP_SOLVER_A) = DMatrix(A); input(LP_SOLVER_C) = DMatrix::zeros(n_); input(LP_SOLVER_LBA) = -DMatrix::inf(nc_); input(LP_SOLVER_UBA) = DMatrix::inf(nc_); input(LP_SOLVER_LBX) = -DMatrix::inf(n_); input(LP_SOLVER_UBX) = DMatrix::inf(n_); // Output arguments setNumOutputs(LP_SOLVER_NUM_OUT); output(LP_SOLVER_X) = DMatrix::zeros(n_); output(LP_SOLVER_COST) = 0.0; output(LP_SOLVER_LAM_X) = DMatrix::zeros(n_); output(LP_SOLVER_LAM_A) = DMatrix::zeros(nc_); input_.scheme = SCHEME_LpSolverInput; output_.scheme = SCHEME_LpSolverOutput; }
returnValue VariablesGrid::init( const DVector& arg, const Grid& _grid, VariableType _type ) { return MatrixVariablesGrid::init( DMatrix(arg),_grid,_type ); }
DMatrix CSparseCholeskyInternal::getFactorization() const { casadi_assert(L_); cs *L = L_->L; int nz = L->nzmax; int m = L->m; // number of rows int n = L->n; // number of columns std::vector< int > rowind(m+1); std::copy(L->p,L->p+m+1,rowind.begin()); std::vector< int > col(nz); std::copy(L->i,L->i+nz,col.begin()); std::vector< double > data(nz); std::copy(L->x,L->x+nz,data.begin()); return trans(DMatrix(CRSSparsity(m, n, col, rowind),data)); }
void QcqpToSocp::init() { // Initialize the base classes QcqpSolverInternal::init(); // Collection of sparsities that will make up SOCP_SOLVER_G std::vector<Sparsity> socp_g; // Allocate Cholesky solvers cholesky_.push_back(LinearSolver("csparsecholesky", st_[QCQP_STRUCT_H])); for (int i=0;i<nq_;++i) { cholesky_.push_back( LinearSolver("csparsecholesky", DMatrix(st_[QCQP_STRUCT_P])(range(i*n_, (i+1)*n_), ALL).sparsity())); } for (int i=0;i<nq_+1;++i) { // Initialize Cholesky solve cholesky_[i].init(); // Harvest Cholsesky sparsity patterns // Note that we add extra scalar to make room for the epigraph-reformulation variable socp_g.push_back(blkdiag( cholesky_[i].getFactorizationSparsity(false), Sparsity::dense(1, 1))); } // Create an SocpSolver instance solver_ = SocpSolver(getOption(solvername()), socpStruct("g", horzcat(socp_g), "a", horzcat(input(QCQP_SOLVER_A).sparsity(), Sparsity::sparse(nc_, 1)))); //solver_.setQCQPOptions(); if (hasSetOption(optionsname())) solver_.setOption(getOption(optionsname())); std::vector<int> ni(nq_+1); for (int i=0;i<nq_+1;++i) { ni[i] = n_+1; } solver_.setOption("ni", ni); // Initialize the SocpSolver solver_.init(); }
void FixedStepIntegrator::init() { // Call the base class init IntegratorInternal::init(); // Number of finite elements and time steps nk_ = getOption("number_of_finite_elements"); casadi_assert(nk_>0); h_ = (tf_ - t0_)/nk_; // Setup discrete time dynamics setupFG(); // Get discrete time dimensions Z_ = F_.input(DAE_Z); nZ_ = Z_.nnz(); RZ_ = G_.isNull() ? DMatrix() : G_.input(RDAE_RZ); nRZ_ = RZ_.nnz(); // Allocate tape if backward states are present if (nrx_>0) { x_tape_.resize(nk_+1, vector<double>(nx_)); Z_tape_.resize(nk_, vector<double>(nZ_)); } }
void SDPSDQPInternal::init() { // Initialize the base classes SdqpSolverInternal::init(); cholesky_ = LinearSolver("csparsecholesky", st_[SDQP_STRUCT_H]); cholesky_.init(); MX g_socp = MX::sym("x", cholesky_.getFactorizationSparsity(true)); MX h_socp = MX::sym("h", n_); MX f_socp = sqrt(inner_prod(h_socp, h_socp)); MX en_socp = 0.5/f_socp; MX f_sdqp = MX::sym("f", input(SDQP_SOLVER_F).sparsity()); MX g_sdqp = MX::sym("g", input(SDQP_SOLVER_G).sparsity()); std::vector<MX> fi(n_+1); MX znp = MX::sparse(n_+1, n_+1); for (int k=0;k<n_;++k) { MX gk = vertcat(g_socp(ALL, k), DMatrix::sparse(1, 1)); MX fk = -blockcat(znp, gk, gk.T(), DMatrix::sparse(1, 1)); // TODO(Joel): replace with ALL fi.push_back(blkdiag(f_sdqp(ALL, Slice(f_sdqp.size1()*k, f_sdqp.size1()*(k+1))), fk)); } MX fin = en_socp*DMatrix::eye(n_+2); fin(n_, n_+1) = en_socp; fin(n_+1, n_) = en_socp; fi.push_back(blkdiag(DMatrix::sparse(f_sdqp.size1(), f_sdqp.size1()), -fin)); MX h0 = vertcat(h_socp, DMatrix::sparse(1, 1)); MX g = blockcat(f_socp*DMatrix::eye(n_+1), h0, h0.T(), f_socp); g = blkdiag(g_sdqp, g); IOScheme mappingIn("g_socp", "h_socp", "f_sdqp", "g_sdqp"); IOScheme mappingOut("f", "g"); mapping_ = MXFunction(mappingIn("g_socp", g_socp, "h_socp", h_socp, "f_sdqp", f_sdqp, "g_sdqp", g_sdqp), mappingOut("f", horzcat(fi), "g", g)); mapping_.init(); // Create an sdpsolver instance std::string sdpsolver_name = getOption("sdp_solver"); sdpsolver_ = SdpSolver(sdpsolver_name, sdpStruct("g", mapping_.output("g").sparsity(), "f", mapping_.output("f").sparsity(), "a", horzcat(input(SDQP_SOLVER_A).sparsity(), Sparsity::sparse(nc_, 1)))); if (hasSetOption("sdp_solver_options")) { sdpsolver_.setOption(getOption("sdp_solver_options")); } // Initialize the SDP solver sdpsolver_.init(); sdpsolver_.input(SDP_SOLVER_C).at(n_)=1; // Output arguments setNumOutputs(SDQP_SOLVER_NUM_OUT); output(SDQP_SOLVER_X) = DMatrix::zeros(n_, 1); std::vector<int> r = range(input(SDQP_SOLVER_G).size1()); output(SDQP_SOLVER_P) = sdpsolver_.output(SDP_SOLVER_P).isEmpty() ? DMatrix() : sdpsolver_.output(SDP_SOLVER_P)(r, r); output(SDQP_SOLVER_DUAL) = sdpsolver_.output(SDP_SOLVER_DUAL).isEmpty() ? DMatrix() : sdpsolver_.output(SDP_SOLVER_DUAL)(r, r); output(SDQP_SOLVER_COST) = 0.0; output(SDQP_SOLVER_DUAL_COST) = 0.0; output(SDQP_SOLVER_LAM_X) = DMatrix::zeros(n_, 1); output(SDQP_SOLVER_LAM_A) = DMatrix::zeros(nc_, 1); }
void NLPSolverInternal::init(){ // Read options verbose_ = getOption("verbose"); gauss_newton_ = getOption("gauss_newton"); // Initialize the functions casadi_assert_message(!F_.isNull(),"No objective function"); if(!F_.isInit()){ F_.init(); log("Objective function initialized"); } if(!G_.isNull() && !G_.isInit()){ G_.init(); log("Constraint function initialized"); } // Get dimensions n_ = F_.input(0).numel(); m_ = G_.isNull() ? 0 : G_.output(0).numel(); parametric_ = getOption("parametric"); if (parametric_) { casadi_assert_message(F_.getNumInputs()==2, "Wrong number of input arguments to F for parametric NLP. Must be 2, but got " << F_.getNumInputs()); } else { casadi_assert_message(F_.getNumInputs()==1, "Wrong number of input arguments to F for non-parametric NLP. Must be 1, but got " << F_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option."); } // Basic sanity checks casadi_assert_message(F_.getNumInputs()==1 || F_.getNumInputs()==2, "Wrong number of input arguments to F. Must be 1 or 2"); if (F_.getNumInputs()==2) parametric_=true; casadi_assert_message(getOption("ignore_check_vec") || gauss_newton_ || F_.input().size2()==1, "To avoid confusion, the input argument to F must be vector. You supplied " << F_.input().dimString() << endl << " We suggest you make the following changes:" << endl << " - F is an SXFunction: SXFunction([X],[rhs]) -> SXFunction([vec(X)],[rhs])" << endl << " or F - -> F = vec(F) " << " - F is an MXFunction: MXFunction([X],[rhs]) -> " << endl << " X_vec = MX(\"X\",vec(X.sparsity())) " << endl << " F_vec = MXFunction([X_flat],[F.call([X_flat.reshape(X.sparsity())])[0]]) " << endl << " or F - -> F = vec(F) " << " You may ignore this warning by setting the 'ignore_check_vec' option to true." << endl ); casadi_assert_message(F_.getNumOutputs()>=1, "Wrong number of output arguments to F"); casadi_assert_message(gauss_newton_ || F_.output().scalar(), "Output argument of F not scalar."); casadi_assert_message(F_.output().dense(), "Output argument of F not dense."); casadi_assert_message(F_.input().dense(), "Input argument of F must be dense. You supplied " << F_.input().dimString()); if(!G_.isNull()) { if (parametric_) { casadi_assert_message(G_.getNumInputs()==2, "Wrong number of input arguments to G for parametric NLP. Must be 2, but got " << G_.getNumInputs()); } else { casadi_assert_message(G_.getNumInputs()==1, "Wrong number of input arguments to G for non-parametric NLP. Must be 1, but got " << G_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option."); } casadi_assert_message(G_.getNumOutputs()>=1, "Wrong number of output arguments to G"); casadi_assert_message(G_.input().numel()==n_, "Inconsistent dimensions"); casadi_assert_message(G_.input().sparsity()==F_.input().sparsity(), "F and G input dimension must match. F " << F_.input().dimString() << ". G " << G_.input().dimString()); } // Find out if we are to expand the objective function in terms of scalar operations bool expand_f = getOption("expand_f"); if(expand_f){ log("Expanding objective function"); // Cast to MXFunction MXFunction F_mx = shared_cast<MXFunction>(F_); if(F_mx.isNull()){ casadi_warning("Cannot expand objective function as it is not an MXFunction"); } else { // Take use the input scheme of G if possible (it might be an SXFunction) vector<SXMatrix> inputv; if(!G_.isNull() && F_.getNumInputs()==G_.getNumInputs()){ inputv = G_.symbolicInputSX(); } else { inputv = F_.symbolicInputSX(); } // Try to expand the MXFunction F_ = F_mx.expand(inputv); F_.setOption("number_of_fwd_dir",F_mx.getOption("number_of_fwd_dir")); F_.setOption("number_of_adj_dir",F_mx.getOption("number_of_adj_dir")); F_.init(); } } // Find out if we are to expand the constraint function in terms of scalar operations bool expand_g = getOption("expand_g"); if(expand_g){ log("Expanding constraint function"); // Cast to MXFunction MXFunction G_mx = shared_cast<MXFunction>(G_); if(G_mx.isNull()){ casadi_warning("Cannot expand constraint function as it is not an MXFunction"); } else { // Take use the input scheme of F if possible (it might be an SXFunction) vector<SXMatrix> inputv; if(F_.getNumInputs()==G_.getNumInputs()){ inputv = F_.symbolicInputSX(); } else { inputv = G_.symbolicInputSX(); } // Try to expand the MXFunction G_ = G_mx.expand(inputv); G_.setOption("number_of_fwd_dir",G_mx.getOption("number_of_fwd_dir")); G_.setOption("number_of_adj_dir",G_mx.getOption("number_of_adj_dir")); G_.init(); } } // Find out if we are to expand the constraint function in terms of scalar operations bool generate_hessian = getOption("generate_hessian"); if(generate_hessian && H_.isNull()){ casadi_assert_message(!gauss_newton_,"Automatic generation of Gauss-Newton Hessian not yet supported"); log("generating hessian"); // Simple if unconstrained if(G_.isNull()){ // Create Hessian of the objective function FX HF = F_.hessian(); HF.init(); // Symbolic inputs of HF vector<MX> HF_in = F_.symbolicInput(); // Lagrange multipliers MX lam("lam",0); // Objective function scaling MX sigma("sigma"); // Inputs of the Hessian function vector<MX> H_in = HF_in; H_in.insert(H_in.begin()+1, lam); H_in.insert(H_in.begin()+2, sigma); // Get an expression for the Hessian of F MX hf = HF.call(HF_in).at(0); // Create the scaled Hessian function H_ = MXFunction(H_in, sigma*hf); log("Unconstrained Hessian function generated"); } else { // G_.isNull() // Check if the functions are SXFunctions SXFunction F_sx = shared_cast<SXFunction>(F_); SXFunction G_sx = shared_cast<SXFunction>(G_); // Efficient if both functions are SXFunction if(!F_sx.isNull() && !G_sx.isNull()){ // Expression for f and g SXMatrix f = F_sx.outputSX(); SXMatrix g = G_sx.outputSX(); // Numeric hessian bool f_num_hess = F_sx.getOption("numeric_hessian"); bool g_num_hess = G_sx.getOption("numeric_hessian"); // Number of derivative directions int f_num_fwd = F_sx.getOption("number_of_fwd_dir"); int g_num_fwd = G_sx.getOption("number_of_fwd_dir"); int f_num_adj = F_sx.getOption("number_of_adj_dir"); int g_num_adj = G_sx.getOption("number_of_adj_dir"); // Substitute symbolic variables in f if different input variables from g if(!isEqual(F_sx.inputSX(),G_sx.inputSX())){ f = substitute(f,F_sx.inputSX(),G_sx.inputSX()); } // Lagrange multipliers SXMatrix lam = ssym("lambda",g.size1()); // Objective function scaling SXMatrix sigma = ssym("sigma"); // Lagrangian function vector<SXMatrix> lfcn_in(parametric_? 4: 3); lfcn_in[0] = G_sx.inputSX(); lfcn_in[1] = lam; lfcn_in[2] = sigma; if (parametric_) lfcn_in[3] = G_sx.inputSX(1); SXFunction lfcn(lfcn_in, sigma*f + inner_prod(lam,g)); lfcn.setOption("verbose",getOption("verbose")); lfcn.setOption("numeric_hessian",f_num_hess || g_num_hess); lfcn.setOption("number_of_fwd_dir",std::min(f_num_fwd,g_num_fwd)); lfcn.setOption("number_of_adj_dir",std::min(f_num_adj,g_num_adj)); lfcn.init(); // Hessian of the Lagrangian H_ = static_cast<FX&>(lfcn).hessian(); H_.setOption("verbose",getOption("verbose")); log("SX Hessian function generated"); } else { // !F_sx.isNull() && !G_sx.isNull() // Check if the functions are SXFunctions MXFunction F_mx = shared_cast<MXFunction>(F_); MXFunction G_mx = shared_cast<MXFunction>(G_); // If they are, check if the arguments are the same if(!F_mx.isNull() && !G_mx.isNull() && isEqual(F_mx.inputMX(),G_mx.inputMX())){ casadi_warning("Exact Hessian calculation for MX is still experimental"); // Expression for f and g MX f = F_mx.outputMX(); MX g = G_mx.outputMX(); // Lagrange multipliers MX lam("lam",g.size1()); // Objective function scaling MX sigma("sigma"); // Inputs of the Lagrangian function vector<MX> lfcn_in(parametric_? 4:3); lfcn_in[0] = G_mx.inputMX(); lfcn_in[1] = lam; lfcn_in[2] = sigma; if (parametric_) lfcn_in[3] = G_mx.inputMX(1); // Lagrangian function MXFunction lfcn(lfcn_in,sigma*f+ inner_prod(lam,g)); lfcn.init(); log("SX Lagrangian function generated"); /* cout << "countNodes(lfcn.outputMX()) = " << countNodes(lfcn.outputMX()) << endl;*/ bool adjoint_mode = true; if(adjoint_mode){ // Gradient of the lagrangian MX gL = lfcn.grad(); log("MX Lagrangian gradient generated"); MXFunction glfcn(lfcn_in,gL); glfcn.init(); log("MX Lagrangian gradient function initialized"); // cout << "countNodes(glfcn.outputMX()) = " << countNodes(glfcn.outputMX()) << endl; // Get Hessian sparsity CRSSparsity H_sp = glfcn.jacSparsity(); log("MX Lagrangian Hessian sparsity determined"); // Uni-directional coloring (note, the hessian is symmetric) CRSSparsity coloring = H_sp.unidirectionalColoring(H_sp); log("MX Lagrangian Hessian coloring determined"); // Number of colors needed is the number of rows int nfwd_glfcn = coloring.size1(); log("MX Lagrangian gradient function number of sensitivity directions determined"); glfcn.setOption("number_of_fwd_dir",nfwd_glfcn); glfcn.updateNumSens(); log("MX Lagrangian gradient function number of sensitivity directions updated"); // Hessian of the Lagrangian H_ = glfcn.jacobian(); } else { // Hessian of the Lagrangian H_ = lfcn.hessian(); } log("MX Lagrangian Hessian function generated"); } else { casadi_assert_message(0, "Automatic calculation of exact Hessian currently only for F and G both SXFunction or MXFunction "); } } // !F_sx.isNull() && !G_sx.isNull() } // G_.isNull() } // generate_hessian && H_.isNull() if(!H_.isNull() && !H_.isInit()) { H_.init(); log("Hessian function initialized"); } // Create a Jacobian if it does not already exists bool generate_jacobian = getOption("generate_jacobian"); if(generate_jacobian && !G_.isNull() && J_.isNull()){ log("Generating Jacobian"); J_ = G_.jacobian(); // Use live variables if SXFunction if(!shared_cast<SXFunction>(J_).isNull()){ J_.setOption("live_variables",true); } log("Jacobian function generated"); } if(!J_.isNull() && !J_.isInit()){ J_.init(); log("Jacobian function initialized"); } if(!H_.isNull()) { if (parametric_) { casadi_assert_message(H_.getNumInputs()>=2, "Wrong number of input arguments to H for parametric NLP. Must be at least 2, but got " << G_.getNumInputs()); } else { casadi_assert_message(H_.getNumInputs()>=1, "Wrong number of input arguments to H for non-parametric NLP. Must be at least 1, but got " << G_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option."); } casadi_assert_message(H_.getNumOutputs()>=1, "Wrong number of output arguments to H"); casadi_assert_message(H_.input(0).numel()==n_,"Inconsistent dimensions"); casadi_assert_message(H_.output().size1()==n_,"Inconsistent dimensions"); casadi_assert_message(H_.output().size2()==n_,"Inconsistent dimensions"); } if(!J_.isNull()){ if (parametric_) { casadi_assert_message(J_.getNumInputs()==2, "Wrong number of input arguments to J for parametric NLP. Must be at least 2, but got " << G_.getNumInputs()); } else { casadi_assert_message(J_.getNumInputs()==1, "Wrong number of input arguments to J for non-parametric NLP. Must be at least 1, but got " << G_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option."); } casadi_assert_message(J_.getNumOutputs()>=1, "Wrong number of output arguments to J"); casadi_assert_message(J_.input().numel()==n_,"Inconsistent dimensions"); casadi_assert_message(J_.output().size2()==n_,"Inconsistent dimensions"); } if (parametric_) { sp_p = F_->input(1).sparsity(); if (!G_.isNull()) casadi_assert_message(sp_p == G_->input(G_->getNumInputs()-1).sparsity(),"Parametric NLP has inconsistent parameter dimensions. F has got " << sp_p.dimString() << " as dimensions, while G has got " << G_->input(G_->getNumInputs()-1).dimString()); if (!H_.isNull()) casadi_assert_message(sp_p == H_->input(H_->getNumInputs()-1).sparsity(),"Parametric NLP has inconsistent parameter dimensions. F has got " << sp_p.dimString() << " as dimensions, while H has got " << H_->input(H_->getNumInputs()-1).dimString()); if (!J_.isNull()) casadi_assert_message(sp_p == J_->input(J_->getNumInputs()-1).sparsity(),"Parametric NLP has inconsistent parameter dimensions. F has got " << sp_p.dimString() << " as dimensions, while J has got " << J_->input(J_->getNumInputs()-1).dimString()); } // Infinity double inf = numeric_limits<double>::infinity(); // Allocate space for inputs input_.resize(NLP_NUM_IN - (parametric_? 0 : 1)); input(NLP_X_INIT) = DMatrix(n_,1,0); input(NLP_LBX) = DMatrix(n_,1,-inf); input(NLP_UBX) = DMatrix(n_,1, inf); input(NLP_LBG) = DMatrix(m_,1,-inf); input(NLP_UBG) = DMatrix(m_,1, inf); input(NLP_LAMBDA_INIT) = DMatrix(m_,1,0); if (parametric_) input(NLP_P) = DMatrix(sp_p,0); // Allocate space for outputs output_.resize(NLP_NUM_OUT); output(NLP_X_OPT) = DMatrix(n_,1,0); output(NLP_COST) = DMatrix(1,1,0); output(NLP_LAMBDA_X) = DMatrix(n_,1,0); output(NLP_LAMBDA_G) = DMatrix(m_,1,0); output(NLP_G) = DMatrix(m_,1,0); if (hasSetOption("iteration_callback")) { callback_ = getOption("iteration_callback"); if (!callback_.isNull()) { if (!callback_.isInit()) callback_.init(); casadi_assert_message(callback_.getNumOutputs()==1, "Callback function should have one output, a scalar that indicates wether to break. 0 = continue"); casadi_assert_message(callback_.output(0).size()==1, "Callback function should have one output, a scalar that indicates wether to break. 0 = continue"); casadi_assert_message(callback_.getNumInputs()==NLP_NUM_OUT, "Callback function should have the output scheme of NLPSolver as input scheme. i.e. " <<NLP_NUM_OUT << " inputs instead of the " << callback_.getNumInputs() << " you provided." ); for (int i=0;i<NLP_NUM_OUT;i++) { casadi_assert_message(callback_.input(i).sparsity()==output(i).sparsity(), "Callback function should have the output scheme of NLPSolver as input scheme. " << "Input #" << i << " (" << getSchemeEntryEnumName(SCHEME_NLPOutput,i) << " aka '" << getSchemeEntryName(SCHEME_NLPOutput,i) << "') was found to be " << callback_.input(i).dimString() << " instead of expected " << output(i).dimString() << "." ); callback_.input(i).setAll(0); } } } callback_step_ = getOption("iteration_callback_step"); // Call the initialization method of the base class FXInternal::init(); }
void SdqpToSdp::init() { // Initialize the base classes SdqpSolverInternal::init(); cholesky_ = LinearSolver("cholesky", "csparsecholesky", st_[SDQP_STRUCT_H]); MX g_socp = MX::sym("x", cholesky_.getFactorizationSparsity(true)); MX h_socp = MX::sym("h", n_); MX f_socp = sqrt(inner_prod(h_socp, h_socp)); MX en_socp = 0.5/f_socp; MX f_sdqp = MX::sym("f", input(SDQP_SOLVER_F).sparsity()); MX g_sdqp = MX::sym("g", input(SDQP_SOLVER_G).sparsity()); std::vector<MX> fi(n_+1); MX znp = MX(n_+1, n_+1); for (int k=0;k<n_;++k) { MX gk = vertcat(g_socp(ALL, k), MX(1, 1)); MX fk = -blockcat(znp, gk, gk.T(), MX(1, 1)); // TODO(Joel): replace with ALL fi.push_back(diagcat(f_sdqp(ALL, Slice(f_sdqp.size1()*k, f_sdqp.size1()*(k+1))), fk)); } MX fin = en_socp*DMatrix::eye(n_+2); fin(n_, n_+1) = en_socp; fin(n_+1, n_) = en_socp; fi.push_back(diagcat(DMatrix(f_sdqp.size1(), f_sdqp.size1()), -fin)); MX h0 = vertcat(h_socp, DMatrix(1, 1)); MX g = blockcat(f_socp*DMatrix::eye(n_+1), h0, h0.T(), f_socp); g = diagcat(g_sdqp, g); Dict opts; opts["input_scheme"] = IOScheme("g_socp", "h_socp", "f_sdqp", "g_sdqp"); opts["output_scheme"] = IOScheme("f", "g"); mapping_ = MXFunction("mapping", make_vector(g_socp, h_socp, f_sdqp, g_sdqp), make_vector(horzcat(fi), g), opts); Dict options; if (hasSetOption(optionsname())) options = getOption(optionsname()); // Create an SdpSolver instance solver_ = SdpSolver("sdpsolver", getOption(solvername()), make_map("g", mapping_.output("g").sparsity(), "f", mapping_.output("f").sparsity(), "a", horzcat(input(SDQP_SOLVER_A).sparsity(), Sparsity(nc_, 1))), options); solver_.input(SDP_SOLVER_C).at(n_)=1; // Output arguments obuf_.resize(SDQP_SOLVER_NUM_OUT); output(SDQP_SOLVER_X) = DMatrix::zeros(n_, 1); std::vector<int> r = range(input(SDQP_SOLVER_G).size1()); output(SDQP_SOLVER_P) = solver_.output(SDP_SOLVER_P).isempty() ? DMatrix() : solver_.output(SDP_SOLVER_P)(r, r); output(SDQP_SOLVER_DUAL) = solver_.output(SDP_SOLVER_DUAL).isempty() ? DMatrix() : solver_.output(SDP_SOLVER_DUAL)(r, r); output(SDQP_SOLVER_COST) = 0.0; output(SDQP_SOLVER_DUAL_COST) = 0.0; output(SDQP_SOLVER_LAM_X) = DMatrix::zeros(n_, 1); output(SDQP_SOLVER_LAM_A) = DMatrix::zeros(nc_, 1); }
void SQPInternal::init(){ // Call the init method of the base class NLPSolverInternal::init(); // Read options maxiter_ = getOption("maxiter"); maxiter_ls_ = getOption("maxiter_ls"); c1_ = getOption("c1"); beta_ = getOption("beta"); merit_memsize_ = getOption("merit_memory"); lbfgs_memory_ = getOption("lbfgs_memory"); tol_pr_ = getOption("tol_pr"); tol_du_ = getOption("tol_du"); regularize_ = getOption("regularize"); if(getOption("hessian_approximation")=="exact") hess_mode_ = HESS_EXACT; else if(getOption("hessian_approximation")=="limited-memory") hess_mode_ = HESS_BFGS; if (hess_mode_== HESS_EXACT && H_.isNull()) { if (!getOption("generate_hessian")){ casadi_error("SQPInternal::evaluate: you set option 'hessian_approximation' to 'exact', but no hessian was supplied. Try with option \"generate_hessian\"."); } } // If the Hessian is generated, we use exact approximation by default if (bool(getOption("generate_hessian"))){ setOption("hessian_approximation", "exact"); } // Allocate a QP solver CRSSparsity H_sparsity = hess_mode_==HESS_EXACT ? H_.output().sparsity() : sp_dense(n_,n_); H_sparsity = H_sparsity + DMatrix::eye(n_).sparsity(); CRSSparsity A_sparsity = J_.isNull() ? CRSSparsity(0,n_,false) : J_.output().sparsity(); QPSolverCreator qp_solver_creator = getOption("qp_solver"); qp_solver_ = qp_solver_creator(H_sparsity,A_sparsity); // Set options if provided if(hasSetOption("qp_solver_options")){ Dictionary qp_solver_options = getOption("qp_solver_options"); qp_solver_.setOption(qp_solver_options); } qp_solver_.init(); // Lagrange multipliers of the NLP mu_.resize(m_); mu_x_.resize(n_); // Lagrange gradient in the next iterate gLag_.resize(n_); gLag_old_.resize(n_); // Current linearization point x_.resize(n_); x_cand_.resize(n_); x_old_.resize(n_); // Constraint function value gk_.resize(m_); gk_cand_.resize(m_); // Hessian approximation Bk_ = DMatrix(H_sparsity); // Jacobian Jk_ = DMatrix(A_sparsity); // Bounds of the QP qp_LBA_.resize(m_); qp_UBA_.resize(m_); qp_LBX_.resize(n_); qp_UBX_.resize(n_); // QP solution dx_.resize(n_); qp_DUAL_X_.resize(n_); qp_DUAL_A_.resize(m_); // Gradient of the objective gf_.resize(n_); // Create Hessian update function if(hess_mode_ == HESS_BFGS){ // Create expressions corresponding to Bk, x, x_old, gLag and gLag_old SXMatrix Bk = ssym("Bk",H_sparsity); SXMatrix x = ssym("x",input(NLP_X_INIT).sparsity()); SXMatrix x_old = ssym("x",x.sparsity()); SXMatrix gLag = ssym("gLag",x.sparsity()); SXMatrix gLag_old = ssym("gLag_old",x.sparsity()); SXMatrix sk = x - x_old; SXMatrix yk = gLag - gLag_old; SXMatrix qk = mul(Bk, sk); // Calculating theta SXMatrix skBksk = inner_prod(sk, qk); SXMatrix omega = if_else(inner_prod(yk, sk) < 0.2 * inner_prod(sk, qk), 0.8 * skBksk / (skBksk - inner_prod(sk, yk)), 1); yk = omega * yk + (1 - omega) * qk; SXMatrix theta = 1. / inner_prod(sk, yk); SXMatrix phi = 1. / inner_prod(qk, sk); SXMatrix Bk_new = Bk + theta * mul(yk, trans(yk)) - phi * mul(qk, trans(qk)); // Inputs of the BFGS update function vector<SXMatrix> bfgs_in(BFGS_NUM_IN); bfgs_in[BFGS_BK] = Bk; bfgs_in[BFGS_X] = x; bfgs_in[BFGS_X_OLD] = x_old; bfgs_in[BFGS_GLAG] = gLag; bfgs_in[BFGS_GLAG_OLD] = gLag_old; bfgs_ = SXFunction(bfgs_in,Bk_new); bfgs_.setOption("number_of_fwd_dir",0); bfgs_.setOption("number_of_adj_dir",0); bfgs_.init(); // Initial Hessian approximation B_init_ = DMatrix::eye(n_); } // Header if(bool(getOption("print_header"))){ cout << "-------------------------------------------" << endl; cout << "This is CasADi::SQPMethod." << endl; switch (hess_mode_) { case HESS_EXACT: cout << "Using exact Hessian" << endl; break; case HESS_BFGS: cout << "Using limited memory BFGS Hessian approximation" << endl; break; } cout << endl; cout << "Number of variables: " << setw(9) << n_ << endl; cout << "Number of constraints: " << setw(9) << m_ << endl; cout << "Number of nonzeros in constraint Jacobian: " << setw(9) << A_sparsity.size() << endl; cout << "Number of nonzeros in Lagrangian Hessian: " << setw(9) << H_sparsity.size() << endl; cout << endl; } }
void AcadoOCPInternal::init(){ // Initialize the functions and get dimensions ffcn_.init(); // Get dimensions nt_ = ffcn_.f_.input(ACADO_FCN_T).numel(); nxd_ = ffcn_.f_.input(ACADO_FCN_XD).numel(); nxa_ = ffcn_.f_.input(ACADO_FCN_XA).numel(); nx_ = nxd_ + nxa_; nu_ = ffcn_.f_.input(ACADO_FCN_U).numel(); np_ = ffcn_.f_.input(ACADO_FCN_P).numel(); nxdot_ = ffcn_.f_.input(ACADO_FCN_XDOT).numel(); // Objective mfcn_.init(); // Path constraints if(!cfcn_.f_.isNull()){ cfcn_.init(); nc_ = cfcn_.f_.output().numel(); } else { nc_ = 0; } // Initial constraint if(!rfcn_.f_.isNull()){ rfcn_.init(); nr_ = rfcn_.f_.output().numel(); } else { nr_ = 0; } // Print: cout << "nt = " << nt_ << endl; cout << "nxd = " << nxd_ << endl; cout << "nxa = " << nxa_ << endl; cout << "nxa = " << nxa_ << endl; cout << "nu = " << nu_ << endl; cout << "np = " << np_ << endl; cout << "nxdot = " << nxdot_ << endl; cout << "nr = " << nr_ << endl; cout << "nc = " << nc_ << endl; // Number of shooting nodes n_nodes_ = getOption("number_of_shooting_nodes").toInt(); // Input dimensions setNumInputs(ACADO_NUM_IN); input(ACADO_X_GUESS) = DMatrix(nx_,n_nodes_+1,0); input(ACADO_U_GUESS) = DMatrix(nu_,n_nodes_+1,0); input(ACADO_P_GUESS) = DMatrix(np_,1,0); input(ACADO_LBX) = DMatrix(nx_,1,0); input(ACADO_UBX) = DMatrix(nx_,1,0); input(ACADO_LBX0) = DMatrix(nx_,1,0); input(ACADO_UBX0) = DMatrix(nx_,1,0); input(ACADO_LBXF) = DMatrix(nx_,1,0); input(ACADO_UBXF) = DMatrix(nx_,1,0); input(ACADO_LBU) = DMatrix(nu_,1,0); input(ACADO_UBU) = DMatrix(nu_,1,0); input(ACADO_LBP) = DMatrix(np_,1,0); input(ACADO_UBP) = DMatrix(np_,1,0); input(ACADO_LBC) = DMatrix(nc_,1,0); input(ACADO_UBC) = DMatrix(nc_,1,0); input(ACADO_LBR) = DMatrix(nr_,1,0); input(ACADO_UBR) = DMatrix(nr_,1,0); // Output dimensions setNumOutputs(ACADO_NUM_OUT); output(ACADO_X_OPT) = DMatrix(nx_,n_nodes_+1,0); output(ACADO_U_OPT) = DMatrix(nu_,n_nodes_+1,0); output(ACADO_P_OPT) = DMatrix(np_,1,0); output(ACADO_COST) = DMatrix(1,1,0); // Initialize FXInternal::init(); // Initialize the user_provided integrators for(vector<Integrator>::iterator it=integrators_.begin(); it!=integrators_.end(); ++it) it->init(); // Set all bounds except initial constraints to +- infinity by default for(int i=ACADO_LBX; i<ACADO_UBC; i = i+2){ input(i).setAll(-numeric_limits<double>::infinity()); input(i+1).setAll(numeric_limits<double>::infinity()); } // variables t_ = new ACADO::TIME(); xd_ = new ACADO::DifferentialState[nxd_]; xa_ = new ACADO::AlgebraicState[nxa_]; u_ = new ACADO::Control[nu_]; p_ = new ACADO::Parameter[np_]; xdot_ = new ACADO::DifferentialStateDerivative[nxdot_]; f_ = new ACADO::DifferentialEquation(); // Augmented state vector arg_ = new ACADO::IntermediateState(nt_+nxd_+nxa_+nu_+np_+nxdot_); int ind=0; for(int i=0; i<nt_; ++i) (*arg_)(ind++) = t_[i]; for(int i=0; i<nxd_; ++i) (*arg_)(ind++) = xd_[i]; for(int i=0; i<nxa_; ++i) (*arg_)(ind++) = xa_[i]; for(int i=0; i<nu_; ++i) (*arg_)(ind++) = u_[i]; for(int i=0; i<np_; ++i) (*arg_)(ind++) = p_[i]; for(int i=0; i<nxdot_; ++i) (*arg_)(ind++) = xdot_[i]; // Create an ocp object double t_start = getOption("start_time").toDouble(); double t_final = getOption("final_time").toDouble(); ocp_ = new ACADO::OCP(t_start, t_final, n_nodes_); // Pass objective function ocp_->minimizeMayerTerm( (*mfcn_.fcn_)(*arg_) ); // Pass dynamic equation ocp_->subjectTo( *f_ << (*ffcn_.fcn_)(*arg_) ); }
returnValue VariablesGrid::addVector( const DVector& newVector, double newTime ) { return MatrixVariablesGrid::addMatrix( DMatrix(newVector),newTime ); }
CRSSparsity mul(const CRSSparsity& a, const CRSSparsity &b) { return (mul(DMatrix(a,1),DMatrix(b,1))).sparsity(); }
void LiftedSQPInternal::init(){ // Call the init method of the base class NlpSolverInternal::init(); // Number of lifted variables nv = getOption("num_lifted"); if(verbose_){ cout << "Initializing SQP method with " << nx_ << " variables and " << ng_ << " constraints." << endl; cout << "Lifting " << nv << " variables." << endl; if(gauss_newton_){ cout << "Gauss-Newton objective with " << F_.input().numel() << " terms." << endl; } } // Read options max_iter_ = getOption("max_iter"); max_iter_ls_ = getOption("max_iter_ls"); toldx_ = getOption("toldx"); tolgl_ = getOption("tolgl"); sigma_ = getOption("sigma"); rho_ = getOption("rho"); mu_safety_ = getOption("mu_safety"); eta_ = getOption("eta"); tau_ = getOption("tau"); // Assume SXFunction for now SXFunction ffcn = shared_cast<SXFunction>(F_); casadi_assert(!ffcn.isNull()); SXFunction gfcn = shared_cast<SXFunction>(G_); casadi_assert(!gfcn.isNull()); // Extract the free variables and split into independent and dependent variables SX x = ffcn.inputExpr(0); int nx = x.size(); nu = nx-nv; SX u = x[Slice(0,nu)]; SX v = x[Slice(nu,nu+nv)]; // Extract the constraint equations and split into constraints and definitions of dependent variables SX f1 = ffcn.outputExpr(0); int nf1 = f1.numel(); SX g = gfcn.outputExpr(0); int nf2 = g.numel()-nv; SX v_eq = g(Slice(0,nv)); SX f2 = g(Slice(nv,nv+nf2)); // Definition of v SX v_def = v_eq + v; // Objective function SX f; // Multipliers SX lam_x, lam_g, lam_f2; if(gauss_newton_){ // Least square objective f = inner_prod(f1,f1)/2; } else { // Scalar objective function f = f1; // Lagrange multipliers for the simple bounds on u SX lam_u = ssym("lam_u",nu); // Lagrange multipliers for the simple bounds on v SX lam_v = ssym("lam_v",nv); // Lagrange multipliers for the simple bounds on x lam_x = vertcat(lam_u,lam_v); // Lagrange multipliers corresponding to the definition of the dependent variables SX lam_v_eq = ssym("lam_v_eq",nv); // Lagrange multipliers for the nonlinear constraints that aren't eliminated lam_f2 = ssym("lam_f2",nf2); if(verbose_){ cout << "Allocated intermediate variables." << endl; } // Lagrange multipliers for constraints lam_g = vertcat(lam_v_eq,lam_f2); // Lagrangian function SX lag = f + inner_prod(lam_x,x); if(!f2.empty()) lag += inner_prod(lam_f2,f2); if(!v.empty()) lag += inner_prod(lam_v_eq,v_def); // Gradient of the Lagrangian SX lgrad = casadi::gradient(lag,x); if(!v.empty()) lgrad -= vertcat(SX::zeros(nu),lam_v_eq); // Put here to ensure that lgrad is of the form "h_extended -v_extended" makeDense(lgrad); if(verbose_){ cout << "Generated the gradient of the Lagrangian." << endl; } // Condensed gradient of the Lagrangian f1 = lgrad[Slice(0,nu)]; nf1 = nu; // Gradient of h SX v_eq_grad = lgrad[Slice(nu,nu+nv)]; // Reverse lam_v_eq and v_eq_grad SX v_eq_grad_reversed = v_eq_grad; copy(v_eq_grad.rbegin(),v_eq_grad.rend(),v_eq_grad_reversed.begin()); SX lam_v_eq_reversed = lam_v_eq; copy(lam_v_eq.rbegin(),lam_v_eq.rend(),lam_v_eq_reversed.begin()); // Augment h and lam_v_eq v_eq.append(v_eq_grad_reversed); v.append(lam_v_eq_reversed); } // Residual function G SXVector G_in(G_NUM_IN); G_in[G_X] = x; G_in[G_LAM_X] = lam_x; G_in[G_LAM_G] = lam_g; SXVector G_out(G_NUM_OUT); G_out[G_D] = v_eq; G_out[G_G] = g; G_out[G_F] = f; rfcn_ = SXFunction(G_in,G_out); rfcn_.setOption("number_of_fwd_dir",0); rfcn_.setOption("number_of_adj_dir",0); rfcn_.setOption("live_variables",true); rfcn_.init(); if(verbose_){ cout << "Generated residual function ( " << shared_cast<SXFunction>(rfcn_).getAlgorithmSize() << " nodes)." << endl; } // Difference vector d SX d = ssym("d",nv); if(!gauss_newton_){ vector<SX> dg = ssym("dg",nv).data(); reverse(dg.begin(),dg.end()); d.append(dg); } // Substitute out the v from the h SX d_def = (v_eq + v)-d; SXVector ex(3); ex[0] = f1; ex[1] = f2; ex[2] = f; substituteInPlace(v, d_def, ex, false); SX f1_z = ex[0]; SX f2_z = ex[1]; SX f_z = ex[2]; // Modified function Z enum ZIn{Z_U,Z_D,Z_LAM_X,Z_LAM_F2,Z_NUM_IN}; SXVector zfcn_in(Z_NUM_IN); zfcn_in[Z_U] = u; zfcn_in[Z_D] = d; zfcn_in[Z_LAM_X] = lam_x; zfcn_in[Z_LAM_F2] = lam_f2; enum ZOut{Z_D_DEF,Z_F12,Z_NUM_OUT}; SXVector zfcn_out(Z_NUM_OUT); zfcn_out[Z_D_DEF] = d_def; zfcn_out[Z_F12] = vertcat(f1_z,f2_z); SXFunction zfcn(zfcn_in,zfcn_out); zfcn.init(); if(verbose_){ cout << "Generated reconstruction function ( " << zfcn.getAlgorithmSize() << " nodes)." << endl; } // Matrix A and B in lifted Newton SX B = zfcn.jac(Z_U,Z_F12); SX B1 = B(Slice(0,nf1),Slice(0,B.size2())); SX B2 = B(Slice(nf1,B.size1()),Slice(0,B.size2())); if(verbose_){ cout << "Formed B1 (dimension " << B1.size1() << "-by-" << B1.size2() << ", "<< B1.size() << " nonzeros) " << "and B2 (dimension " << B2.size1() << "-by-" << B2.size2() << ", "<< B2.size() << " nonzeros)." << endl; } // Step in u SX du = ssym("du",nu); SX dlam_f2 = ssym("dlam_f2",lam_f2.sparsity()); SX b1 = f1_z; SX b2 = f2_z; SX e; if(nv > 0){ // Directional derivative of Z vector<vector<SX> > Z_fwdSeed(2,zfcn_in); vector<vector<SX> > Z_fwdSens(2,zfcn_out); vector<vector<SX> > Z_adjSeed; vector<vector<SX> > Z_adjSens; Z_fwdSeed[0][Z_U].setZero(); Z_fwdSeed[0][Z_D] = -d; Z_fwdSeed[0][Z_LAM_X].setZero(); Z_fwdSeed[0][Z_LAM_F2].setZero(); Z_fwdSeed[1][Z_U] = du; Z_fwdSeed[1][Z_D] = -d; Z_fwdSeed[1][Z_LAM_X].setZero(); Z_fwdSeed[1][Z_LAM_F2] = dlam_f2; zfcn.eval(zfcn_in,zfcn_out,Z_fwdSeed,Z_fwdSens,Z_adjSeed,Z_adjSens); b1 += Z_fwdSens[0][Z_F12](Slice(0,nf1)); b2 += Z_fwdSens[0][Z_F12](Slice(nf1,B.size1())); e = Z_fwdSens[1][Z_D_DEF]; } if(verbose_){ cout << "Formed b1 (dimension " << b1.size1() << "-by-" << b1.size2() << ", "<< b1.size() << " nonzeros) " << "and b2 (dimension " << b2.size1() << "-by-" << b2.size2() << ", "<< b2.size() << " nonzeros)." << endl; } // Generate Gauss-Newton Hessian if(gauss_newton_){ b1 = mul(trans(B1),b1); B1 = mul(trans(B1),B1); if(verbose_){ cout << "Gauss Newton Hessian (dimension " << B1.size1() << "-by-" << B1.size2() << ", "<< B1.size() << " nonzeros)." << endl; } } // Make sure b1 and b2 are dense vectors makeDense(b1); makeDense(b2); // Quadratic approximation SXVector lfcn_in(LIN_NUM_IN); lfcn_in[LIN_X] = x; lfcn_in[LIN_D] = d; lfcn_in[LIN_LAM_X] = lam_x; lfcn_in[LIN_LAM_G] = lam_g; SXVector lfcn_out(LIN_NUM_OUT); lfcn_out[LIN_F1] = b1; lfcn_out[LIN_J1] = B1; lfcn_out[LIN_F2] = b2; lfcn_out[LIN_J2] = B2; lfcn_ = SXFunction(lfcn_in,lfcn_out); // lfcn_.setOption("verbose",true); lfcn_.setOption("number_of_fwd_dir",0); lfcn_.setOption("number_of_adj_dir",0); lfcn_.setOption("live_variables",true); lfcn_.init(); if(verbose_){ cout << "Generated linearization function ( " << shared_cast<SXFunction>(lfcn_).getAlgorithmSize() << " nodes)." << endl; } // Step expansion SXVector efcn_in(EXP_NUM_IN); copy(lfcn_in.begin(),lfcn_in.end(),efcn_in.begin()); efcn_in[EXP_DU] = du; efcn_in[EXP_DLAM_F2] = dlam_f2; efcn_ = SXFunction(efcn_in,e); efcn_.setOption("number_of_fwd_dir",0); efcn_.setOption("number_of_adj_dir",0); efcn_.setOption("live_variables",true); efcn_.init(); if(verbose_){ cout << "Generated step expansion function ( " << shared_cast<SXFunction>(efcn_).getAlgorithmSize() << " nodes)." << endl; } // Current guess for the primal solution DMatrix &x_k = output(NLP_SOLVER_X); // Current guess for the dual solution DMatrix &lam_x_k = output(NLP_SOLVER_LAM_X); DMatrix &lam_g_k = output(NLP_SOLVER_LAM_G); // Allocate a QP solver QpSolverCreator qp_solver_creator = getOption("qp_solver"); qp_solver_ = qp_solver_creator(B1.sparsity(),B2.sparsity()); // Set options if provided if(hasSetOption("qp_solver_options")){ Dictionary qp_solver_options = getOption("qp_solver_options"); qp_solver_.setOption(qp_solver_options); } // Initialize the QP solver qp_solver_.init(); if(verbose_){ cout << "Allocated QP solver." << endl; } // Residual d_k_ = DMatrix(d.sparsity(),0); // Primal step dx_k_ = DMatrix(x_k.sparsity()); // Dual step dlam_x_k_ = DMatrix(lam_x_k.sparsity()); dlam_g_k_ = DMatrix(lam_g_k.sparsity()); }
void SdpSolverInternal::init() { // Call the init method of the base class FunctionInternal::init(); calc_p_ = getOption("calc_p"); calc_dual_ = getOption("calc_dual"); print_problem_ = getOption("print_problem"); // Find aggregate sparsity pattern Sparsity aggregate = input(SDP_SOLVER_G).sparsity(); for (int i=0;i<n_;++i) { aggregate = aggregate + input(SDP_SOLVER_F)(ALL, Slice(i*m_, (i+1)*m_)).sparsity(); } // Detect block diagonal structure in this sparsity pattern std::vector<int> p; std::vector<int> r; nb_ = aggregate.stronglyConnectedComponents(p, r); block_boundaries_.resize(nb_+1); std::copy(r.begin(), r.begin()+nb_+1, block_boundaries_.begin()); block_sizes_.resize(nb_); for (int i=0;i<nb_;++i) { block_sizes_[i]=r[i+1]-r[i]; } // Make a mapping function from dense blocks to inversely-permuted block diagonal P std::vector< SX > full_blocks; for (int i=0;i<nb_;++i) { full_blocks.push_back(SX::sym("block", block_sizes_[i], block_sizes_[i])); } Pmapper_ = SXFunction(full_blocks, blkdiag(full_blocks)(lookupvector(p, p.size()), lookupvector(p, p.size()))); Pmapper_.init(); if (nb_>0) { // Make a mapping function from (G, F) -> (G[p, p]_j, F_i[p, p]j) SX G = SX::sym("G", input(SDP_SOLVER_G).sparsity()); SX F = SX::sym("F", input(SDP_SOLVER_F).sparsity()); std::vector<SX> in; in.push_back(G); in.push_back(F); std::vector<SX> out((n_+1)*nb_); for (int j=0;j<nb_;++j) { out[j] = G(p, p)(Slice(r[j], r[j+1]), Slice(r[j], r[j+1])); } for (int i=0;i<n_;++i) { SX Fi = F(ALL, Slice(i*m_, (i+1)*m_))(p, p); for (int j=0;j<nb_;++j) { out[(i+1)*nb_+j] = Fi(Slice(r[j], r[j+1]), Slice(r[j], r[j+1])); } } mapping_ = SXFunction(in, out); mapping_.init(); } // Output arguments setNumOutputs(SDP_SOLVER_NUM_OUT); output(SDP_SOLVER_X) = DMatrix::zeros(n_, 1); output(SDP_SOLVER_P) = calc_p_? DMatrix(Pmapper_.output().sparsity(), 0) : DMatrix(); output(SDP_SOLVER_DUAL) = calc_dual_? DMatrix(Pmapper_.output().sparsity(), 0) : DMatrix(); output(SDP_SOLVER_COST) = 0.0; output(SDP_SOLVER_DUAL_COST) = 0.0; output(SDP_SOLVER_LAM_X) = DMatrix::zeros(n_, 1); output(SDP_SOLVER_LAM_A) = DMatrix::zeros(nc_, 1); }
void DirectCollocationInternal::init(){ // Initialize the base classes OCPSolverInternal::init(); // Free parameters currently not supported casadi_assert_message(np_==0, "Not implemented"); // Legendre collocation points double legendre_points[][6] = { {0}, {0,0.500000}, {0,0.211325,0.788675}, {0,0.112702,0.500000,0.887298}, {0,0.069432,0.330009,0.669991,0.930568}, {0,0.046910,0.230765,0.500000,0.769235,0.953090}}; // Radau collocation points double radau_points[][6] = { {0}, {0,1.000000}, {0,0.333333,1.000000}, {0,0.155051,0.644949,1.000000}, {0,0.088588,0.409467,0.787659,1.000000}, {0,0.057104,0.276843,0.583590,0.860240,1.000000}}; // Read options bool use_radau; if(getOption("collocation_scheme")=="radau"){ use_radau = true; } else if(getOption("collocation_scheme")=="legendre"){ use_radau = false; } // Interpolation order deg_ = getOption("interpolation_order"); // All collocation time points double* tau_root = use_radau ? radau_points[deg_] : legendre_points[deg_]; // Size of the finite elements double h = tf_/nk_; // Coefficients of the collocation equation vector<vector<MX> > C(deg_+1,vector<MX>(deg_+1)); // Coefficients of the collocation equation as DMatrix DMatrix C_num = DMatrix(deg_+1,deg_+1,0); // Coefficients of the continuity equation vector<MX> D(deg_+1); // Coefficients of the collocation equation as DMatrix DMatrix D_num = DMatrix(deg_+1,1,0); // Collocation point SXMatrix tau = ssym("tau"); // For all collocation points for(int j=0; j<deg_+1; ++j){ // Construct Lagrange polynomials to get the polynomial basis at the collocation point SXMatrix L = 1; for(int j2=0; j2<deg_+1; ++j2){ if(j2 != j){ L *= (tau-tau_root[j2])/(tau_root[j]-tau_root[j2]); } } SXFunction lfcn(tau,L); lfcn.init(); // Evaluate the polynomial at the final time to get the coefficients of the continuity equation lfcn.setInput(1.0); lfcn.evaluate(); D[j] = lfcn.output(); D_num(j) = lfcn.output(); // Evaluate the time derivative of the polynomial at all collocation points to get the coefficients of the continuity equation for(int j2=0; j2<deg_+1; ++j2){ lfcn.setInput(tau_root[j2]); lfcn.setFwdSeed(1.0); lfcn.evaluate(1,0); C[j][j2] = lfcn.fwdSens(); C_num(j,j2) = lfcn.fwdSens(); } } C_num(std::vector<int>(1,0),ALL) = 0; C_num(0,0) = 1; // All collocation time points vector<vector<double> > T(nk_); for(int k=0; k<nk_; ++k){ T[k].resize(deg_+1); for(int j=0; j<=deg_; ++j){ T[k][j] = h*(k + tau_root[j]); } } // Total number of variables int nlp_nx = 0; nlp_nx += nk_*(deg_+1)*nx_; // Collocated states nlp_nx += nk_*nu_; // Parametrized controls nlp_nx += nx_; // Final state // NLP variable vector MX nlp_x = msym("x",nlp_nx); int offset = 0; // Get collocated states and parametrized control vector<vector<MX> > X(nk_+1); vector<MX> U(nk_); for(int k=0; k<nk_; ++k){ // Collocated states X[k].resize(deg_+1); for(int j=0; j<=deg_; ++j){ // Get the expression for the state vector X[k][j] = nlp_x[Slice(offset,offset+nx_)]; offset += nx_; } // Parametrized controls U[k] = nlp_x[Slice(offset,offset+nu_)]; offset += nu_; } // State at end time X[nk_].resize(1); X[nk_][0] = nlp_x[Slice(offset,offset+nx_)]; offset += nx_; casadi_assert(offset==nlp_nx); // Constraint function for the NLP vector<MX> nlp_g; // Objective function MX nlp_j = 0; // For all finite elements for(int k=0; k<nk_; ++k){ // For all collocation points for(int j=1; j<=deg_; ++j){ // Get an expression for the state derivative at the collocation point MX xp_jk = 0; for(int r=0; r<=deg_; ++r){ xp_jk += C[r][j]*X[k][r]; } // Add collocation equations to the NLP MX fk = ffcn_.call(daeIn("x",X[k][j],"p",U[k]))[DAE_ODE]; nlp_g.push_back(h*fk - xp_jk); } // Get an expression for the state at the end of the finite element MX xf_k = 0; for(int r=0; r<=deg_; ++r){ xf_k += D[r]*X[k][r]; } // Add continuity equation to NLP nlp_g.push_back(X[k+1][0] - xf_k); // Add path constraints if(nh_>0){ MX pk = cfcn_.call(daeIn("x",X[k+1][0],"p",U[k])).at(0); nlp_g.push_back(pk); } // Add integral objective function term // [Jk] = lfcn.call([X[k+1,0], U[k]]) // nlp_j += Jk } // Add end cost MX Jk = mfcn_.call(mayerIn("x",X[nk_][0])).at(0); nlp_j += Jk; // Objective function of the NLP F_ = MXFunction(nlp_x, nlp_j); // Nonlinear constraint function G_ = MXFunction(nlp_x, vertcat(nlp_g)); // Get the NLP creator function NLPSolverCreator nlp_solver_creator = getOption("nlp_solver"); // Allocate an NLP solver nlp_solver_ = nlp_solver_creator(F_,G_,FX(),FX()); // Pass options if(hasSetOption("nlp_solver_options")){ const Dictionary& nlp_solver_options = getOption("nlp_solver_options"); nlp_solver_.setOption(nlp_solver_options); } // Initialize the solver nlp_solver_.init(); }
CRSSparsity CRSSparsity::operator+(const CRSSparsity& b) const { return (DMatrix(*this,1)+DMatrix(b,1)).sparsity(); }
VariablesGrid::VariablesGrid( const DVector& arg, const Grid& _grid, VariableType _type ) : MatrixVariablesGrid( DMatrix(arg),_grid,_type ) { }
int main(int argc, char **argv) { P_STR *P; SEQ_STR *SEQ; int i,j; int type; char *path; if (argc!=3) { printf(" Usage: %s seqfile type \n",argv[0]); printf(" where type stands for one of the options of \n"); printf(" \"long\", \"short\" or \"glob\"\n"); exit(1); } /*if ((path=getenv("IUPred_PATH"))==NULL) { fprintf(stderr,"IUPred_PATH environment variable is not set\n"); path="./"; } */ path="lib/disorder_apps/iupred"; printf("# IUPred \n"); printf("# Copyright (c) Zsuzsanna Dosztanyi, 2005\n"); printf("#\n"); printf("# Z. Dosztanyi, V. Csizmok, P. Tompa and I. Simon\n"); printf("# J. Mol. Biol. (2005) 347, 827-839. \n"); printf("#\n"); printf("#\n"); if ((strncmp(argv[2],"long",4))==0) { type=0; } else if ((strncmp(argv[2],"short",5))==0) { type=1; } else if ((strncmp(argv[2],"glob",4))==0) { type=2; } else { printf("Wrong argument\n");exit(1); } SEQ=malloc(sizeof(SEQ_STR)); Get_Seq(argv[1],SEQ); if (SEQ->le==0) {printf(" Sequence length 0\n");exit(1);} #ifdef DEBUG printf("%s %d\n%s\n",SEQ->name,SEQ->le,SEQ->seq); #endif P=malloc(sizeof(P_STR)); P->CC= DMatrix(AAN,AAN); if (type==0) { LC=1; UC=100; WS=10; Flag_EP=0; read_ref(path,"ss",P->CC); Get_Histo(P, path, "histo"); IUPred(SEQ,P); printf("# Prediction output \n"); printf("# %s\n",SEQ->name); for (i=0;i<SEQ->le;i++) printf("%5d %c %10.4f\n",i+1,SEQ->seq[i],SEQ->en[i]); } if (type==1) { LC=1; UC=25; WS=10; Flag_EP=1; EP=-1.26; read_ref(path,"ss_casp",P->CC); Get_Histo(P, path, "histo_casp"); IUPred(SEQ,P); printf("# Prediction output \n"); printf("# %s\n",SEQ->name); for (i=0;i<SEQ->le;i++) printf("%5d %c %10.4f\n",i+1,SEQ->seq[i],SEQ->en[i]); } if (type==2) { char *globseq; LC=1; UC=100; WS=15; Flag_EP=0; read_ref(path,"ss",P->CC); Get_Histo(P,path,"histo"); IUPred(SEQ,P); Min_Ene=DMin_Ene; JOIN=DJOIN; DEL=DDEL; getRegions(SEQ); globseq=malloc((SEQ->le+1)*sizeof(char)); for (i=0;i<SEQ->le;i++) globseq[i]=tolower(SEQ->seq[i]); printf("# Prediction output \n"); printf("# %s\n",SEQ->name); printf("Number of globular domains: %5d \n",SEQ->ngr); for (i=0;i<SEQ->ngr;i++) { printf(" globular domain %5d. %d - %d \n", i+1,SEQ->gr[i][0]+1,SEQ->gr[i][1]+1); for (j=SEQ->gr[i][0];j<SEQ->gr[i][1]+1;j++) { globseq[j]=toupper(globseq[j]); } } printf(">%s\n",SEQ->name); for (i=0;i<SEQ->le;i++) { if ((i>0)&&(i%60==0)) printf("\n"); else if ((i>0)&&(i%10==0)) printf(" "); printf("%c",globseq[i]); } printf("\n"); free(globseq); #ifdef DEBUG for (i=0;i<SEQ->le;i++) printf("%5d %c %10.4f\n",i,SEQ->seq[i],SEQ->en[i]); #endif } free(SEQ->seq); free(SEQ->eprof);free(SEQ->en);free(SEQ->smp); free(SEQ); return 0; }
void IpoptInternal::evaluate(){ if (inputs_check_) checkInputs(); checkInitialBounds(); if (gather_stats_) { Dictionary iterations; iterations["inf_pr"] = std::vector<double>(); iterations["inf_du"] = std::vector<double>(); iterations["mu"] = std::vector<double>(); iterations["d_norm"] = std::vector<double>(); iterations["regularization_size"] = std::vector<double>(); iterations["obj"] = std::vector<double>(); iterations["ls_trials"] = std::vector<int>(); iterations["alpha_pr"] = std::vector<double>(); iterations["alpha_du"] = std::vector<double>(); iterations["obj"] = std::vector<double>(); stats_["iterations"] = iterations; } // Reset the counters t_eval_f_ = t_eval_grad_f_ = t_eval_g_ = t_eval_jac_g_ = t_eval_h_ = t_callback_fun_ = t_callback_prepare_ = t_mainloop_ = 0; n_eval_f_ = n_eval_grad_f_ = n_eval_g_ = n_eval_jac_g_ = n_eval_h_ = n_iter_ = 0; // Get back the smart pointers Ipopt::SmartPtr<Ipopt::TNLP> *userclass = static_cast<Ipopt::SmartPtr<Ipopt::TNLP>*>(userclass_); Ipopt::SmartPtr<Ipopt::IpoptApplication> *app = static_cast<Ipopt::SmartPtr<Ipopt::IpoptApplication>*>(app_); double time1 = clock(); // Ask Ipopt to solve the problem Ipopt::ApplicationReturnStatus status = (*app)->OptimizeTNLP(*userclass); double time2 = clock(); t_mainloop_ = double(time2-time1)/CLOCKS_PER_SEC; #ifdef WITH_SIPOPT if(run_sens_ || compute_red_hessian_){ // Calculate parametric sensitivities Ipopt::SmartPtr<Ipopt::SensApplication> *app_sens = static_cast<Ipopt::SmartPtr<Ipopt::SensApplication>*>(app_sens_); (*app_sens)->SetIpoptAlgorithmObjects(*app, status); (*app_sens)->Run(); // Access the reduced Hessian calculator #ifdef WITH_CASADI_PATCH if(compute_red_hessian_){ // Get the reduced Hessian std::vector<double> red_hess = (*app_sens)->ReducedHessian(); // Get the dimensions int N; for(N=0; N*N<red_hess.size(); ++N); casadi_assert(N*N==red_hess.size()); // Store to statistics red_hess_ = DMatrix(Sparsity::dense(N,N),red_hess); } #endif // WITH_CASADI_PATCH } #endif // WITH_SIPOPT if (hasOption("print_time") && bool(getOption("print_time"))) { // Write timings cout << "time spent in eval_f: " << t_eval_f_ << " s."; if (n_eval_f_>0) cout << " (" << n_eval_f_ << " calls, " << (t_eval_f_/n_eval_f_)*1000 << " ms. average)"; cout << endl; cout << "time spent in eval_grad_f: " << t_eval_grad_f_ << " s."; if (n_eval_grad_f_>0) cout << " (" << n_eval_grad_f_ << " calls, " << (t_eval_grad_f_/n_eval_grad_f_)*1000 << " ms. average)"; cout << endl; cout << "time spent in eval_g: " << t_eval_g_ << " s."; if (n_eval_g_>0) cout << " (" << n_eval_g_ << " calls, " << (t_eval_g_/n_eval_g_)*1000 << " ms. average)"; cout << endl; cout << "time spent in eval_jac_g: " << t_eval_jac_g_ << " s."; if (n_eval_jac_g_>0) cout << " (" << n_eval_jac_g_ << " calls, " << (t_eval_jac_g_/n_eval_jac_g_)*1000 << " ms. average)"; cout << endl; cout << "time spent in eval_h: " << t_eval_h_ << " s."; if (n_eval_h_>1) cout << " (" << n_eval_h_ << " calls, " << (t_eval_h_/n_eval_h_)*1000 << " ms. average)"; cout << endl; cout << "time spent in main loop: " << t_mainloop_ << " s." << endl; cout << "time spent in callback function: " << t_callback_fun_ << " s." << endl; cout << "time spent in callback preparation: " << t_callback_prepare_ << " s." << endl; } if (status == Solve_Succeeded) stats_["return_status"] = "Solve_Succeeded"; if (status == Solved_To_Acceptable_Level) stats_["return_status"] = "Solved_To_Acceptable_Level"; if (status == Infeasible_Problem_Detected) stats_["return_status"] = "Infeasible_Problem_Detected"; if (status == Search_Direction_Becomes_Too_Small) stats_["return_status"] = "Search_Direction_Becomes_Too_Small"; if (status == Diverging_Iterates) stats_["return_status"] = "Diverging_Iterates"; if (status == User_Requested_Stop) stats_["return_status"] = "User_Requested_Stop"; if (status == Maximum_Iterations_Exceeded) stats_["return_status"] = "Maximum_Iterations_Exceeded"; if (status == Restoration_Failed) stats_["return_status"] = "Restoration_Failed"; if (status == Error_In_Step_Computation) stats_["return_status"] = "Error_In_Step_Computation"; if (status == Not_Enough_Degrees_Of_Freedom) stats_["return_status"] = "Not_Enough_Degrees_Of_Freedom"; if (status == Invalid_Problem_Definition) stats_["return_status"] = "Invalid_Problem_Definition"; if (status == Invalid_Option) stats_["return_status"] = "Invalid_Option"; if (status == Invalid_Number_Detected) stats_["return_status"] = "Invalid_Number_Detected"; if (status == Unrecoverable_Exception) stats_["return_status"] = "Unrecoverable_Exception"; if (status == NonIpopt_Exception_Thrown) stats_["return_status"] = "NonIpopt_Exception_Thrown"; if (status == Insufficient_Memory) stats_["return_status"] = "Insufficient_Memory"; stats_["t_eval_f"] = t_eval_f_; stats_["t_eval_grad_f"] = t_eval_grad_f_; stats_["t_eval_g"] = t_eval_g_; stats_["t_eval_jac_g"] = t_eval_jac_g_; stats_["t_eval_h"] = t_eval_h_; stats_["t_mainloop"] = t_mainloop_; stats_["t_callback_fun"] = t_callback_fun_; stats_["t_callback_prepare"] = t_callback_prepare_; stats_["n_eval_f"] = n_eval_f_; stats_["n_eval_grad_f"] = n_eval_grad_f_; stats_["n_eval_g"] = n_eval_g_; stats_["n_eval_jac_g"] = n_eval_jac_g_; stats_["n_eval_h"] = n_eval_h_; stats_["iter_count"] = n_iter_-1; }