FX FX::operator[](int k) const { // Argument checking if (k<0) k+=getNumOutputs(); casadi_assert_message(k<getNumOutputs(),"FX[int k]:: Attempt to select the k'th output with k=" << k << ", but should be smaller or equal to number of outputs (" << getNumOutputs() << ")."); // Get the inputs in MX form std::vector< MX > in = symbolicInput(); // Clone such that we can use const. FX clone = *this; // Get the outputs std::vector< MX > result = clone.call(in); // Construct an MXFunction with only the k'th output MXFunction ret(in,result[k]); ret.setInputScheme(getInputScheme()); // Initialize it ret.init(); // And return it, will automatically cast to FX return ret; }
void EvaluationMX::evaluateMX(const MXPtrV& arg, MXPtrV& res, const MXPtrVV& fseed, MXPtrVV& fsens, const MXPtrVV& aseed, MXPtrVV& asens, bool output_given) { // Number of sensitivity directions int nfdir = fsens.size(); casadi_assert(nfdir==0 || fcn_.spCanEvaluate(true)); int nadir = aseed.size(); casadi_assert(nadir==0 || fcn_.spCanEvaluate(false)); // Get/generate the derivative function FX d = fcn_.derivative(nfdir, nadir); // Temporary vector<MX> tmp; // Assemble inputs vector<MX> d_arg; d_arg.reserve(d.getNumInputs()); // Nondifferentiated inputs tmp = getVector(arg); d_arg.insert(d_arg.end(), tmp.begin(), tmp.end()); for (MXPtrVV::const_iterator i = fseed.begin(); i != fseed.end(); ++i) { tmp = getVector(*i); d_arg.insert(d_arg.end(), tmp.begin(), tmp.end()); } for (MXPtrVV::const_iterator i = aseed.begin(); i != aseed.end(); ++i) { tmp = getVector(*i); d_arg.insert(d_arg.end(), tmp.begin(), tmp.end()); } // Evaluate symbolically vector<MX> d_res = d.call(d_arg); vector<MX>::const_iterator d_res_it = d_res.begin(); // Collect the nondifferentiated results for (MXPtrV::iterator i = res.begin(); i != res.end(); ++i, ++d_res_it) { if (!output_given && *i) **i = *d_res_it; } // Collect the forward sensitivities for (MXPtrVV::iterator j = fsens.begin(); j != fsens.end(); ++j) { for (MXPtrV::iterator i = j->begin(); i != j->end(); ++i, ++d_res_it) { if (*i) **i = *d_res_it; } } // Collect the adjoint sensitivities for (MXPtrVV::iterator j = asens.begin(); j != asens.end(); ++j) { for (MXPtrV::iterator i = j->begin(); i != j->end(); ++i, ++d_res_it) { if(*i && !d_res_it->isNull()){ **i += *d_res_it; } } } // Make sure that we've got to the end of the outputs casadi_assert(d_res_it==d_res.end()); }
MXFunction vec (const FX &a_) { FX a = a_; // Pass null if input is null if (a.isNull()) return MXFunction(); // Get the MX inputs, only used for shape const std::vector<MX> &symbolicInputMX = a.symbolicInput(); // Have a vector with MX that have the shape of vec(symbolicInputMX ) std::vector<MX> symbolicInputMX_vec(a.getNumInputs()); // Make vector valued MX's out of them std::vector<MX> symbolicInputMX_vec_reshape(a.getNumInputs()); // Apply the vec-transformation to the inputs for (int i=0;i<symbolicInputMX.size();++i) { std::stringstream s; s << "X_flat_" << i; symbolicInputMX_vec[i] = MX(s.str(),vec(symbolicInputMX[i].sparsity())); symbolicInputMX_vec_reshape[i] = trans(reshape(symbolicInputMX_vec[i],trans(symbolicInputMX[i].sparsity()))); } // Call the original function with the vecced inputs std::vector<MX> symbolicOutputMX = a.call(symbolicInputMX_vec_reshape); // Apply the vec-transformation to the outputs for (int i=0;i<symbolicOutputMX.size();++i) symbolicOutputMX[i] = vec(symbolicOutputMX[i]); // Make a new function with the vecced input/outputs MXFunction ret(symbolicInputMX_vec,symbolicOutputMX); // Initialize it if a was if (a.isInit()) ret.init(); return ret; }
void NLPSolverInternal::init(){ // Read options verbose_ = getOption("verbose"); gauss_newton_ = getOption("gauss_newton"); // Initialize the functions casadi_assert_message(!F_.isNull(),"No objective function"); if(!F_.isInit()){ F_.init(); log("Objective function initialized"); } if(!G_.isNull() && !G_.isInit()){ G_.init(); log("Constraint function initialized"); } // Get dimensions n_ = F_.input(0).numel(); m_ = G_.isNull() ? 0 : G_.output(0).numel(); parametric_ = getOption("parametric"); if (parametric_) { casadi_assert_message(F_.getNumInputs()==2, "Wrong number of input arguments to F for parametric NLP. Must be 2, but got " << F_.getNumInputs()); } else { casadi_assert_message(F_.getNumInputs()==1, "Wrong number of input arguments to F for non-parametric NLP. Must be 1, but got " << F_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option."); } // Basic sanity checks casadi_assert_message(F_.getNumInputs()==1 || F_.getNumInputs()==2, "Wrong number of input arguments to F. Must be 1 or 2"); if (F_.getNumInputs()==2) parametric_=true; casadi_assert_message(getOption("ignore_check_vec") || gauss_newton_ || F_.input().size2()==1, "To avoid confusion, the input argument to F must be vector. You supplied " << F_.input().dimString() << endl << " We suggest you make the following changes:" << endl << " - F is an SXFunction: SXFunction([X],[rhs]) -> SXFunction([vec(X)],[rhs])" << endl << " or F - -> F = vec(F) " << " - F is an MXFunction: MXFunction([X],[rhs]) -> " << endl << " X_vec = MX(\"X\",vec(X.sparsity())) " << endl << " F_vec = MXFunction([X_flat],[F.call([X_flat.reshape(X.sparsity())])[0]]) " << endl << " or F - -> F = vec(F) " << " You may ignore this warning by setting the 'ignore_check_vec' option to true." << endl ); casadi_assert_message(F_.getNumOutputs()>=1, "Wrong number of output arguments to F"); casadi_assert_message(gauss_newton_ || F_.output().scalar(), "Output argument of F not scalar."); casadi_assert_message(F_.output().dense(), "Output argument of F not dense."); casadi_assert_message(F_.input().dense(), "Input argument of F must be dense. You supplied " << F_.input().dimString()); if(!G_.isNull()) { if (parametric_) { casadi_assert_message(G_.getNumInputs()==2, "Wrong number of input arguments to G for parametric NLP. Must be 2, but got " << G_.getNumInputs()); } else { casadi_assert_message(G_.getNumInputs()==1, "Wrong number of input arguments to G for non-parametric NLP. Must be 1, but got " << G_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option."); } casadi_assert_message(G_.getNumOutputs()>=1, "Wrong number of output arguments to G"); casadi_assert_message(G_.input().numel()==n_, "Inconsistent dimensions"); casadi_assert_message(G_.input().sparsity()==F_.input().sparsity(), "F and G input dimension must match. F " << F_.input().dimString() << ". G " << G_.input().dimString()); } // Find out if we are to expand the objective function in terms of scalar operations bool expand_f = getOption("expand_f"); if(expand_f){ log("Expanding objective function"); // Cast to MXFunction MXFunction F_mx = shared_cast<MXFunction>(F_); if(F_mx.isNull()){ casadi_warning("Cannot expand objective function as it is not an MXFunction"); } else { // Take use the input scheme of G if possible (it might be an SXFunction) vector<SXMatrix> inputv; if(!G_.isNull() && F_.getNumInputs()==G_.getNumInputs()){ inputv = G_.symbolicInputSX(); } else { inputv = F_.symbolicInputSX(); } // Try to expand the MXFunction F_ = F_mx.expand(inputv); F_.setOption("number_of_fwd_dir",F_mx.getOption("number_of_fwd_dir")); F_.setOption("number_of_adj_dir",F_mx.getOption("number_of_adj_dir")); F_.init(); } } // Find out if we are to expand the constraint function in terms of scalar operations bool expand_g = getOption("expand_g"); if(expand_g){ log("Expanding constraint function"); // Cast to MXFunction MXFunction G_mx = shared_cast<MXFunction>(G_); if(G_mx.isNull()){ casadi_warning("Cannot expand constraint function as it is not an MXFunction"); } else { // Take use the input scheme of F if possible (it might be an SXFunction) vector<SXMatrix> inputv; if(F_.getNumInputs()==G_.getNumInputs()){ inputv = F_.symbolicInputSX(); } else { inputv = G_.symbolicInputSX(); } // Try to expand the MXFunction G_ = G_mx.expand(inputv); G_.setOption("number_of_fwd_dir",G_mx.getOption("number_of_fwd_dir")); G_.setOption("number_of_adj_dir",G_mx.getOption("number_of_adj_dir")); G_.init(); } } // Find out if we are to expand the constraint function in terms of scalar operations bool generate_hessian = getOption("generate_hessian"); if(generate_hessian && H_.isNull()){ casadi_assert_message(!gauss_newton_,"Automatic generation of Gauss-Newton Hessian not yet supported"); log("generating hessian"); // Simple if unconstrained if(G_.isNull()){ // Create Hessian of the objective function FX HF = F_.hessian(); HF.init(); // Symbolic inputs of HF vector<MX> HF_in = F_.symbolicInput(); // Lagrange multipliers MX lam("lam",0); // Objective function scaling MX sigma("sigma"); // Inputs of the Hessian function vector<MX> H_in = HF_in; H_in.insert(H_in.begin()+1, lam); H_in.insert(H_in.begin()+2, sigma); // Get an expression for the Hessian of F MX hf = HF.call(HF_in).at(0); // Create the scaled Hessian function H_ = MXFunction(H_in, sigma*hf); log("Unconstrained Hessian function generated"); } else { // G_.isNull() // Check if the functions are SXFunctions SXFunction F_sx = shared_cast<SXFunction>(F_); SXFunction G_sx = shared_cast<SXFunction>(G_); // Efficient if both functions are SXFunction if(!F_sx.isNull() && !G_sx.isNull()){ // Expression for f and g SXMatrix f = F_sx.outputSX(); SXMatrix g = G_sx.outputSX(); // Numeric hessian bool f_num_hess = F_sx.getOption("numeric_hessian"); bool g_num_hess = G_sx.getOption("numeric_hessian"); // Number of derivative directions int f_num_fwd = F_sx.getOption("number_of_fwd_dir"); int g_num_fwd = G_sx.getOption("number_of_fwd_dir"); int f_num_adj = F_sx.getOption("number_of_adj_dir"); int g_num_adj = G_sx.getOption("number_of_adj_dir"); // Substitute symbolic variables in f if different input variables from g if(!isEqual(F_sx.inputSX(),G_sx.inputSX())){ f = substitute(f,F_sx.inputSX(),G_sx.inputSX()); } // Lagrange multipliers SXMatrix lam = ssym("lambda",g.size1()); // Objective function scaling SXMatrix sigma = ssym("sigma"); // Lagrangian function vector<SXMatrix> lfcn_in(parametric_? 4: 3); lfcn_in[0] = G_sx.inputSX(); lfcn_in[1] = lam; lfcn_in[2] = sigma; if (parametric_) lfcn_in[3] = G_sx.inputSX(1); SXFunction lfcn(lfcn_in, sigma*f + inner_prod(lam,g)); lfcn.setOption("verbose",getOption("verbose")); lfcn.setOption("numeric_hessian",f_num_hess || g_num_hess); lfcn.setOption("number_of_fwd_dir",std::min(f_num_fwd,g_num_fwd)); lfcn.setOption("number_of_adj_dir",std::min(f_num_adj,g_num_adj)); lfcn.init(); // Hessian of the Lagrangian H_ = static_cast<FX&>(lfcn).hessian(); H_.setOption("verbose",getOption("verbose")); log("SX Hessian function generated"); } else { // !F_sx.isNull() && !G_sx.isNull() // Check if the functions are SXFunctions MXFunction F_mx = shared_cast<MXFunction>(F_); MXFunction G_mx = shared_cast<MXFunction>(G_); // If they are, check if the arguments are the same if(!F_mx.isNull() && !G_mx.isNull() && isEqual(F_mx.inputMX(),G_mx.inputMX())){ casadi_warning("Exact Hessian calculation for MX is still experimental"); // Expression for f and g MX f = F_mx.outputMX(); MX g = G_mx.outputMX(); // Lagrange multipliers MX lam("lam",g.size1()); // Objective function scaling MX sigma("sigma"); // Inputs of the Lagrangian function vector<MX> lfcn_in(parametric_? 4:3); lfcn_in[0] = G_mx.inputMX(); lfcn_in[1] = lam; lfcn_in[2] = sigma; if (parametric_) lfcn_in[3] = G_mx.inputMX(1); // Lagrangian function MXFunction lfcn(lfcn_in,sigma*f+ inner_prod(lam,g)); lfcn.init(); log("SX Lagrangian function generated"); /* cout << "countNodes(lfcn.outputMX()) = " << countNodes(lfcn.outputMX()) << endl;*/ bool adjoint_mode = true; if(adjoint_mode){ // Gradient of the lagrangian MX gL = lfcn.grad(); log("MX Lagrangian gradient generated"); MXFunction glfcn(lfcn_in,gL); glfcn.init(); log("MX Lagrangian gradient function initialized"); // cout << "countNodes(glfcn.outputMX()) = " << countNodes(glfcn.outputMX()) << endl; // Get Hessian sparsity CRSSparsity H_sp = glfcn.jacSparsity(); log("MX Lagrangian Hessian sparsity determined"); // Uni-directional coloring (note, the hessian is symmetric) CRSSparsity coloring = H_sp.unidirectionalColoring(H_sp); log("MX Lagrangian Hessian coloring determined"); // Number of colors needed is the number of rows int nfwd_glfcn = coloring.size1(); log("MX Lagrangian gradient function number of sensitivity directions determined"); glfcn.setOption("number_of_fwd_dir",nfwd_glfcn); glfcn.updateNumSens(); log("MX Lagrangian gradient function number of sensitivity directions updated"); // Hessian of the Lagrangian H_ = glfcn.jacobian(); } else { // Hessian of the Lagrangian H_ = lfcn.hessian(); } log("MX Lagrangian Hessian function generated"); } else { casadi_assert_message(0, "Automatic calculation of exact Hessian currently only for F and G both SXFunction or MXFunction "); } } // !F_sx.isNull() && !G_sx.isNull() } // G_.isNull() } // generate_hessian && H_.isNull() if(!H_.isNull() && !H_.isInit()) { H_.init(); log("Hessian function initialized"); } // Create a Jacobian if it does not already exists bool generate_jacobian = getOption("generate_jacobian"); if(generate_jacobian && !G_.isNull() && J_.isNull()){ log("Generating Jacobian"); J_ = G_.jacobian(); // Use live variables if SXFunction if(!shared_cast<SXFunction>(J_).isNull()){ J_.setOption("live_variables",true); } log("Jacobian function generated"); } if(!J_.isNull() && !J_.isInit()){ J_.init(); log("Jacobian function initialized"); } if(!H_.isNull()) { if (parametric_) { casadi_assert_message(H_.getNumInputs()>=2, "Wrong number of input arguments to H for parametric NLP. Must be at least 2, but got " << G_.getNumInputs()); } else { casadi_assert_message(H_.getNumInputs()>=1, "Wrong number of input arguments to H for non-parametric NLP. Must be at least 1, but got " << G_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option."); } casadi_assert_message(H_.getNumOutputs()>=1, "Wrong number of output arguments to H"); casadi_assert_message(H_.input(0).numel()==n_,"Inconsistent dimensions"); casadi_assert_message(H_.output().size1()==n_,"Inconsistent dimensions"); casadi_assert_message(H_.output().size2()==n_,"Inconsistent dimensions"); } if(!J_.isNull()){ if (parametric_) { casadi_assert_message(J_.getNumInputs()==2, "Wrong number of input arguments to J for parametric NLP. Must be at least 2, but got " << G_.getNumInputs()); } else { casadi_assert_message(J_.getNumInputs()==1, "Wrong number of input arguments to J for non-parametric NLP. Must be at least 1, but got " << G_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option."); } casadi_assert_message(J_.getNumOutputs()>=1, "Wrong number of output arguments to J"); casadi_assert_message(J_.input().numel()==n_,"Inconsistent dimensions"); casadi_assert_message(J_.output().size2()==n_,"Inconsistent dimensions"); } if (parametric_) { sp_p = F_->input(1).sparsity(); if (!G_.isNull()) casadi_assert_message(sp_p == G_->input(G_->getNumInputs()-1).sparsity(),"Parametric NLP has inconsistent parameter dimensions. F has got " << sp_p.dimString() << " as dimensions, while G has got " << G_->input(G_->getNumInputs()-1).dimString()); if (!H_.isNull()) casadi_assert_message(sp_p == H_->input(H_->getNumInputs()-1).sparsity(),"Parametric NLP has inconsistent parameter dimensions. F has got " << sp_p.dimString() << " as dimensions, while H has got " << H_->input(H_->getNumInputs()-1).dimString()); if (!J_.isNull()) casadi_assert_message(sp_p == J_->input(J_->getNumInputs()-1).sparsity(),"Parametric NLP has inconsistent parameter dimensions. F has got " << sp_p.dimString() << " as dimensions, while J has got " << J_->input(J_->getNumInputs()-1).dimString()); } // Infinity double inf = numeric_limits<double>::infinity(); // Allocate space for inputs input_.resize(NLP_NUM_IN - (parametric_? 0 : 1)); input(NLP_X_INIT) = DMatrix(n_,1,0); input(NLP_LBX) = DMatrix(n_,1,-inf); input(NLP_UBX) = DMatrix(n_,1, inf); input(NLP_LBG) = DMatrix(m_,1,-inf); input(NLP_UBG) = DMatrix(m_,1, inf); input(NLP_LAMBDA_INIT) = DMatrix(m_,1,0); if (parametric_) input(NLP_P) = DMatrix(sp_p,0); // Allocate space for outputs output_.resize(NLP_NUM_OUT); output(NLP_X_OPT) = DMatrix(n_,1,0); output(NLP_COST) = DMatrix(1,1,0); output(NLP_LAMBDA_X) = DMatrix(n_,1,0); output(NLP_LAMBDA_G) = DMatrix(m_,1,0); output(NLP_G) = DMatrix(m_,1,0); if (hasSetOption("iteration_callback")) { callback_ = getOption("iteration_callback"); if (!callback_.isNull()) { if (!callback_.isInit()) callback_.init(); casadi_assert_message(callback_.getNumOutputs()==1, "Callback function should have one output, a scalar that indicates wether to break. 0 = continue"); casadi_assert_message(callback_.output(0).size()==1, "Callback function should have one output, a scalar that indicates wether to break. 0 = continue"); casadi_assert_message(callback_.getNumInputs()==NLP_NUM_OUT, "Callback function should have the output scheme of NLPSolver as input scheme. i.e. " <<NLP_NUM_OUT << " inputs instead of the " << callback_.getNumInputs() << " you provided." ); for (int i=0;i<NLP_NUM_OUT;i++) { casadi_assert_message(callback_.input(i).sparsity()==output(i).sparsity(), "Callback function should have the output scheme of NLPSolver as input scheme. " << "Input #" << i << " (" << getSchemeEntryEnumName(SCHEME_NLPOutput,i) << " aka '" << getSchemeEntryName(SCHEME_NLPOutput,i) << "') was found to be " << callback_.input(i).dimString() << " instead of expected " << output(i).dimString() << "." ); callback_.input(i).setAll(0); } } } callback_step_ = getOption("iteration_callback_step"); // Call the initialization method of the base class FXInternal::init(); }