FX FX::operator[](int k) const { // Argument checking if (k<0) k+=getNumOutputs(); casadi_assert_message(k<getNumOutputs(),"FX[int k]:: Attempt to select the k'th output with k=" << k << ", but should be smaller or equal to number of outputs (" << getNumOutputs() << ")."); // Get the inputs in MX form std::vector< MX > in = symbolicInput(); // Clone such that we can use const. FX clone = *this; // Get the outputs std::vector< MX > result = clone.call(in); // Construct an MXFunction with only the k'th output MXFunction ret(in,result[k]); ret.setInputScheme(getInputScheme()); // Initialize it ret.init(); // And return it, will automatically cast to FX return ret; }
FX * createExternalFunction( const char * const objname ){ FX * extFun = new ExternalFunction("./" + string(objname)); extFun->init(); return extFun; }
void EvaluationMX::evaluateMX(const MXPtrV& arg, MXPtrV& res, const MXPtrVV& fseed, MXPtrVV& fsens, const MXPtrVV& aseed, MXPtrVV& asens, bool output_given) { // Number of sensitivity directions int nfdir = fsens.size(); casadi_assert(nfdir==0 || fcn_.spCanEvaluate(true)); int nadir = aseed.size(); casadi_assert(nadir==0 || fcn_.spCanEvaluate(false)); // Get/generate the derivative function FX d = fcn_.derivative(nfdir, nadir); // Temporary vector<MX> tmp; // Assemble inputs vector<MX> d_arg; d_arg.reserve(d.getNumInputs()); // Nondifferentiated inputs tmp = getVector(arg); d_arg.insert(d_arg.end(), tmp.begin(), tmp.end()); for (MXPtrVV::const_iterator i = fseed.begin(); i != fseed.end(); ++i) { tmp = getVector(*i); d_arg.insert(d_arg.end(), tmp.begin(), tmp.end()); } for (MXPtrVV::const_iterator i = aseed.begin(); i != aseed.end(); ++i) { tmp = getVector(*i); d_arg.insert(d_arg.end(), tmp.begin(), tmp.end()); } // Evaluate symbolically vector<MX> d_res = d.call(d_arg); vector<MX>::const_iterator d_res_it = d_res.begin(); // Collect the nondifferentiated results for (MXPtrV::iterator i = res.begin(); i != res.end(); ++i, ++d_res_it) { if (!output_given && *i) **i = *d_res_it; } // Collect the forward sensitivities for (MXPtrVV::iterator j = fsens.begin(); j != fsens.end(); ++j) { for (MXPtrV::iterator i = j->begin(); i != j->end(); ++i, ++d_res_it) { if (*i) **i = *d_res_it; } } // Collect the adjoint sensitivities for (MXPtrVV::iterator j = asens.begin(); j != asens.end(); ++j) { for (MXPtrV::iterator i = j->begin(); i != j->end(); ++i, ++d_res_it) { if(*i && !d_res_it->isNull()){ **i += *d_res_it; } } } // Make sure that we've got to the end of the outputs casadi_assert(d_res_it==d_res.end()); }
EvaluationMX::EvaluationMX(const FX& fcn, const std::vector<MX> &arg) : fcn_(fcn) { // Number inputs and outputs int num_in = fcn.getNumInputs(); int num_out = fcn.getNumOutputs(); // All dependencies of the function vector<MX> d = arg; d.resize(num_in); setDependencies(d); setSparsity(CRSSparsity(1, 1, true)); }
void generateCodeAndCompile(FX fcn, const std::string& name, bool expand){ cout << "Generating code for " << name << endl; // Convert to an SXFunction (may or may not improve efficiency) if(expand && is_a<MXFunction>(fcn)){ fcn = SXFunction(shared_cast<MXFunction>(fcn)); fcn.init(); } // Generate C code fcn.generateCode(name + ".c"); // Compilation command string compile_command = "gcc -fPIC -shared -O3 " + name + ".c -o " + name + ".so"; // Compile the c-code int flag = system(compile_command.c_str()); casadi_assert_message(flag==0, "Compilation failed"); }
int main(int argc, char* argv[]) { if(argc != 3) { std::cout << "Usage: " << argv[0] << " <input file> <output file>"; std::cout << std::endl; exit(1); } SndfileHandle input_file = SndfileHandle(argv[1]); int num_samples = (int)input_file.frames(); int hop_size = 512; int frame_size = 2048; int num_frames = (num_samples - frame_size) / hop_size; FX fx; fx.hop_size(hop_size); fx.frame_size(frame_size); fx.preserve_envelope(true); EnvInterp env_interp = EnvInterp(&fx, num_frames); fx.add_transformation(&env_interp); std::vector<double> input(num_samples); std::vector<double> output(num_samples); input_file.read(&(input[0]), num_samples); for(int n = 0; n < (num_samples - hop_size); n += hop_size) { fx.process_frame(hop_size, &(input[n]), hop_size, &(output[n])); } int output_format = SF_FORMAT_WAV | SF_FORMAT_PCM_16; int output_channels = 1; int output_sampling_rate = 44100; SndfileHandle output_file = SndfileHandle(argv[2], SFM_WRITE, output_format, output_channels, output_sampling_rate); output_file.write(&(output[0]), num_samples); return 0; }
MXFunction vec (const FX &a_) { FX a = a_; // Pass null if input is null if (a.isNull()) return MXFunction(); // Get the MX inputs, only used for shape const std::vector<MX> &symbolicInputMX = a.symbolicInput(); // Have a vector with MX that have the shape of vec(symbolicInputMX ) std::vector<MX> symbolicInputMX_vec(a.getNumInputs()); // Make vector valued MX's out of them std::vector<MX> symbolicInputMX_vec_reshape(a.getNumInputs()); // Apply the vec-transformation to the inputs for (int i=0;i<symbolicInputMX.size();++i) { std::stringstream s; s << "X_flat_" << i; symbolicInputMX_vec[i] = MX(s.str(),vec(symbolicInputMX[i].sparsity())); symbolicInputMX_vec_reshape[i] = trans(reshape(symbolicInputMX_vec[i],trans(symbolicInputMX[i].sparsity()))); } // Call the original function with the vecced inputs std::vector<MX> symbolicOutputMX = a.call(symbolicInputMX_vec_reshape); // Apply the vec-transformation to the outputs for (int i=0;i<symbolicOutputMX.size();++i) symbolicOutputMX[i] = vec(symbolicOutputMX[i]); // Make a new function with the vecced input/outputs MXFunction ret(symbolicInputMX_vec,symbolicOutputMX); // Initialize it if a was if (a.isInit()) ret.init(); return ret; }
void FeatureLibrary::registerFX( FX fx) { string fxString = fx->getTypeString(); std::transform( fxString.begin(), fxString.end(), fxString.begin(), ::tolower); // make lower case _fxMap[fxString] = fx; } // end registerFX
void EvaluationMX::create(const FX& fcn, const std::vector<MX> &arg, std::vector<MX> &res, const std::vector<std::vector<MX> > &fseed, std::vector<std::vector<MX> > &fsens, const std::vector<std::vector<MX> > &aseed, std::vector<std::vector<MX> > &asens, bool output_given) { // Number inputs and outputs int num_in = fcn.getNumInputs(); int num_out = fcn.getNumOutputs(); // Number of directional derivatives int nfdir = fseed.size(); int nadir = aseed.size(); // Create the evaluation node MX ev; if(nfdir>0 || nadir>0){ // Create derivative function Derivative dfcn(fcn,nfdir,nadir); stringstream ss; ss << "der_" << fcn.getOption("name") << "_" << nfdir << "_" << nadir; dfcn.setOption("verbose",fcn.getOption("verbose")); dfcn.setOption("name",ss.str()); dfcn.init(); // All inputs vector<MX> darg; darg.reserve(num_in*(1+nfdir) + num_out*nadir); darg.insert(darg.end(),arg.begin(),arg.end()); // Forward seeds for(int dir=0; dir<nfdir; ++dir){ darg.insert(darg.end(),fseed[dir].begin(),fseed[dir].end()); } // Adjoint seeds for(int dir=0; dir<nadir; ++dir){ darg.insert(darg.end(),aseed[dir].begin(),aseed[dir].end()); } ev.assignNode(new EvaluationMX(dfcn, darg)); } else { ev.assignNode(new EvaluationMX(fcn, arg)); } // Output index int ind = 0; // Create the output nodes corresponding to the nondifferented function res.resize(num_out); for (int i = 0; i < num_out; ++i, ++ind) { if(!output_given){ if(!fcn.output(i).empty()){ res[i].assignNode(new OutputNode(ev, ind)); } else { res[i] = MX(); } } } // Forward sensitivities fsens.resize(nfdir); for(int dir = 0; dir < nfdir; ++dir){ fsens[dir].resize(num_out); for (int i = 0; i < num_out; ++i, ++ind) { if (!fcn.output(i).empty()){ fsens[dir][i].assignNode(new OutputNode(ev, ind)); } else { fsens[dir][i] = MX(); } } } // Adjoint sensitivities asens.resize(nadir); for (int dir = 0; dir < nadir; ++dir) { asens[dir].resize(num_in); for (int i = 0; i < num_in; ++i, ++ind) { if (!fcn.input(i).empty()) { asens[dir][i].assignNode(new OutputNode(ev, ind)); } else { asens[dir][i] = MX(); } } } }
void NLPSolverInternal::init(){ // Read options verbose_ = getOption("verbose"); gauss_newton_ = getOption("gauss_newton"); // Initialize the functions casadi_assert_message(!F_.isNull(),"No objective function"); if(!F_.isInit()){ F_.init(); log("Objective function initialized"); } if(!G_.isNull() && !G_.isInit()){ G_.init(); log("Constraint function initialized"); } // Get dimensions n_ = F_.input(0).numel(); m_ = G_.isNull() ? 0 : G_.output(0).numel(); parametric_ = getOption("parametric"); if (parametric_) { casadi_assert_message(F_.getNumInputs()==2, "Wrong number of input arguments to F for parametric NLP. Must be 2, but got " << F_.getNumInputs()); } else { casadi_assert_message(F_.getNumInputs()==1, "Wrong number of input arguments to F for non-parametric NLP. Must be 1, but got " << F_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option."); } // Basic sanity checks casadi_assert_message(F_.getNumInputs()==1 || F_.getNumInputs()==2, "Wrong number of input arguments to F. Must be 1 or 2"); if (F_.getNumInputs()==2) parametric_=true; casadi_assert_message(getOption("ignore_check_vec") || gauss_newton_ || F_.input().size2()==1, "To avoid confusion, the input argument to F must be vector. You supplied " << F_.input().dimString() << endl << " We suggest you make the following changes:" << endl << " - F is an SXFunction: SXFunction([X],[rhs]) -> SXFunction([vec(X)],[rhs])" << endl << " or F - -> F = vec(F) " << " - F is an MXFunction: MXFunction([X],[rhs]) -> " << endl << " X_vec = MX(\"X\",vec(X.sparsity())) " << endl << " F_vec = MXFunction([X_flat],[F.call([X_flat.reshape(X.sparsity())])[0]]) " << endl << " or F - -> F = vec(F) " << " You may ignore this warning by setting the 'ignore_check_vec' option to true." << endl ); casadi_assert_message(F_.getNumOutputs()>=1, "Wrong number of output arguments to F"); casadi_assert_message(gauss_newton_ || F_.output().scalar(), "Output argument of F not scalar."); casadi_assert_message(F_.output().dense(), "Output argument of F not dense."); casadi_assert_message(F_.input().dense(), "Input argument of F must be dense. You supplied " << F_.input().dimString()); if(!G_.isNull()) { if (parametric_) { casadi_assert_message(G_.getNumInputs()==2, "Wrong number of input arguments to G for parametric NLP. Must be 2, but got " << G_.getNumInputs()); } else { casadi_assert_message(G_.getNumInputs()==1, "Wrong number of input arguments to G for non-parametric NLP. Must be 1, but got " << G_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option."); } casadi_assert_message(G_.getNumOutputs()>=1, "Wrong number of output arguments to G"); casadi_assert_message(G_.input().numel()==n_, "Inconsistent dimensions"); casadi_assert_message(G_.input().sparsity()==F_.input().sparsity(), "F and G input dimension must match. F " << F_.input().dimString() << ". G " << G_.input().dimString()); } // Find out if we are to expand the objective function in terms of scalar operations bool expand_f = getOption("expand_f"); if(expand_f){ log("Expanding objective function"); // Cast to MXFunction MXFunction F_mx = shared_cast<MXFunction>(F_); if(F_mx.isNull()){ casadi_warning("Cannot expand objective function as it is not an MXFunction"); } else { // Take use the input scheme of G if possible (it might be an SXFunction) vector<SXMatrix> inputv; if(!G_.isNull() && F_.getNumInputs()==G_.getNumInputs()){ inputv = G_.symbolicInputSX(); } else { inputv = F_.symbolicInputSX(); } // Try to expand the MXFunction F_ = F_mx.expand(inputv); F_.setOption("number_of_fwd_dir",F_mx.getOption("number_of_fwd_dir")); F_.setOption("number_of_adj_dir",F_mx.getOption("number_of_adj_dir")); F_.init(); } } // Find out if we are to expand the constraint function in terms of scalar operations bool expand_g = getOption("expand_g"); if(expand_g){ log("Expanding constraint function"); // Cast to MXFunction MXFunction G_mx = shared_cast<MXFunction>(G_); if(G_mx.isNull()){ casadi_warning("Cannot expand constraint function as it is not an MXFunction"); } else { // Take use the input scheme of F if possible (it might be an SXFunction) vector<SXMatrix> inputv; if(F_.getNumInputs()==G_.getNumInputs()){ inputv = F_.symbolicInputSX(); } else { inputv = G_.symbolicInputSX(); } // Try to expand the MXFunction G_ = G_mx.expand(inputv); G_.setOption("number_of_fwd_dir",G_mx.getOption("number_of_fwd_dir")); G_.setOption("number_of_adj_dir",G_mx.getOption("number_of_adj_dir")); G_.init(); } } // Find out if we are to expand the constraint function in terms of scalar operations bool generate_hessian = getOption("generate_hessian"); if(generate_hessian && H_.isNull()){ casadi_assert_message(!gauss_newton_,"Automatic generation of Gauss-Newton Hessian not yet supported"); log("generating hessian"); // Simple if unconstrained if(G_.isNull()){ // Create Hessian of the objective function FX HF = F_.hessian(); HF.init(); // Symbolic inputs of HF vector<MX> HF_in = F_.symbolicInput(); // Lagrange multipliers MX lam("lam",0); // Objective function scaling MX sigma("sigma"); // Inputs of the Hessian function vector<MX> H_in = HF_in; H_in.insert(H_in.begin()+1, lam); H_in.insert(H_in.begin()+2, sigma); // Get an expression for the Hessian of F MX hf = HF.call(HF_in).at(0); // Create the scaled Hessian function H_ = MXFunction(H_in, sigma*hf); log("Unconstrained Hessian function generated"); } else { // G_.isNull() // Check if the functions are SXFunctions SXFunction F_sx = shared_cast<SXFunction>(F_); SXFunction G_sx = shared_cast<SXFunction>(G_); // Efficient if both functions are SXFunction if(!F_sx.isNull() && !G_sx.isNull()){ // Expression for f and g SXMatrix f = F_sx.outputSX(); SXMatrix g = G_sx.outputSX(); // Numeric hessian bool f_num_hess = F_sx.getOption("numeric_hessian"); bool g_num_hess = G_sx.getOption("numeric_hessian"); // Number of derivative directions int f_num_fwd = F_sx.getOption("number_of_fwd_dir"); int g_num_fwd = G_sx.getOption("number_of_fwd_dir"); int f_num_adj = F_sx.getOption("number_of_adj_dir"); int g_num_adj = G_sx.getOption("number_of_adj_dir"); // Substitute symbolic variables in f if different input variables from g if(!isEqual(F_sx.inputSX(),G_sx.inputSX())){ f = substitute(f,F_sx.inputSX(),G_sx.inputSX()); } // Lagrange multipliers SXMatrix lam = ssym("lambda",g.size1()); // Objective function scaling SXMatrix sigma = ssym("sigma"); // Lagrangian function vector<SXMatrix> lfcn_in(parametric_? 4: 3); lfcn_in[0] = G_sx.inputSX(); lfcn_in[1] = lam; lfcn_in[2] = sigma; if (parametric_) lfcn_in[3] = G_sx.inputSX(1); SXFunction lfcn(lfcn_in, sigma*f + inner_prod(lam,g)); lfcn.setOption("verbose",getOption("verbose")); lfcn.setOption("numeric_hessian",f_num_hess || g_num_hess); lfcn.setOption("number_of_fwd_dir",std::min(f_num_fwd,g_num_fwd)); lfcn.setOption("number_of_adj_dir",std::min(f_num_adj,g_num_adj)); lfcn.init(); // Hessian of the Lagrangian H_ = static_cast<FX&>(lfcn).hessian(); H_.setOption("verbose",getOption("verbose")); log("SX Hessian function generated"); } else { // !F_sx.isNull() && !G_sx.isNull() // Check if the functions are SXFunctions MXFunction F_mx = shared_cast<MXFunction>(F_); MXFunction G_mx = shared_cast<MXFunction>(G_); // If they are, check if the arguments are the same if(!F_mx.isNull() && !G_mx.isNull() && isEqual(F_mx.inputMX(),G_mx.inputMX())){ casadi_warning("Exact Hessian calculation for MX is still experimental"); // Expression for f and g MX f = F_mx.outputMX(); MX g = G_mx.outputMX(); // Lagrange multipliers MX lam("lam",g.size1()); // Objective function scaling MX sigma("sigma"); // Inputs of the Lagrangian function vector<MX> lfcn_in(parametric_? 4:3); lfcn_in[0] = G_mx.inputMX(); lfcn_in[1] = lam; lfcn_in[2] = sigma; if (parametric_) lfcn_in[3] = G_mx.inputMX(1); // Lagrangian function MXFunction lfcn(lfcn_in,sigma*f+ inner_prod(lam,g)); lfcn.init(); log("SX Lagrangian function generated"); /* cout << "countNodes(lfcn.outputMX()) = " << countNodes(lfcn.outputMX()) << endl;*/ bool adjoint_mode = true; if(adjoint_mode){ // Gradient of the lagrangian MX gL = lfcn.grad(); log("MX Lagrangian gradient generated"); MXFunction glfcn(lfcn_in,gL); glfcn.init(); log("MX Lagrangian gradient function initialized"); // cout << "countNodes(glfcn.outputMX()) = " << countNodes(glfcn.outputMX()) << endl; // Get Hessian sparsity CRSSparsity H_sp = glfcn.jacSparsity(); log("MX Lagrangian Hessian sparsity determined"); // Uni-directional coloring (note, the hessian is symmetric) CRSSparsity coloring = H_sp.unidirectionalColoring(H_sp); log("MX Lagrangian Hessian coloring determined"); // Number of colors needed is the number of rows int nfwd_glfcn = coloring.size1(); log("MX Lagrangian gradient function number of sensitivity directions determined"); glfcn.setOption("number_of_fwd_dir",nfwd_glfcn); glfcn.updateNumSens(); log("MX Lagrangian gradient function number of sensitivity directions updated"); // Hessian of the Lagrangian H_ = glfcn.jacobian(); } else { // Hessian of the Lagrangian H_ = lfcn.hessian(); } log("MX Lagrangian Hessian function generated"); } else { casadi_assert_message(0, "Automatic calculation of exact Hessian currently only for F and G both SXFunction or MXFunction "); } } // !F_sx.isNull() && !G_sx.isNull() } // G_.isNull() } // generate_hessian && H_.isNull() if(!H_.isNull() && !H_.isInit()) { H_.init(); log("Hessian function initialized"); } // Create a Jacobian if it does not already exists bool generate_jacobian = getOption("generate_jacobian"); if(generate_jacobian && !G_.isNull() && J_.isNull()){ log("Generating Jacobian"); J_ = G_.jacobian(); // Use live variables if SXFunction if(!shared_cast<SXFunction>(J_).isNull()){ J_.setOption("live_variables",true); } log("Jacobian function generated"); } if(!J_.isNull() && !J_.isInit()){ J_.init(); log("Jacobian function initialized"); } if(!H_.isNull()) { if (parametric_) { casadi_assert_message(H_.getNumInputs()>=2, "Wrong number of input arguments to H for parametric NLP. Must be at least 2, but got " << G_.getNumInputs()); } else { casadi_assert_message(H_.getNumInputs()>=1, "Wrong number of input arguments to H for non-parametric NLP. Must be at least 1, but got " << G_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option."); } casadi_assert_message(H_.getNumOutputs()>=1, "Wrong number of output arguments to H"); casadi_assert_message(H_.input(0).numel()==n_,"Inconsistent dimensions"); casadi_assert_message(H_.output().size1()==n_,"Inconsistent dimensions"); casadi_assert_message(H_.output().size2()==n_,"Inconsistent dimensions"); } if(!J_.isNull()){ if (parametric_) { casadi_assert_message(J_.getNumInputs()==2, "Wrong number of input arguments to J for parametric NLP. Must be at least 2, but got " << G_.getNumInputs()); } else { casadi_assert_message(J_.getNumInputs()==1, "Wrong number of input arguments to J for non-parametric NLP. Must be at least 1, but got " << G_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option."); } casadi_assert_message(J_.getNumOutputs()>=1, "Wrong number of output arguments to J"); casadi_assert_message(J_.input().numel()==n_,"Inconsistent dimensions"); casadi_assert_message(J_.output().size2()==n_,"Inconsistent dimensions"); } if (parametric_) { sp_p = F_->input(1).sparsity(); if (!G_.isNull()) casadi_assert_message(sp_p == G_->input(G_->getNumInputs()-1).sparsity(),"Parametric NLP has inconsistent parameter dimensions. F has got " << sp_p.dimString() << " as dimensions, while G has got " << G_->input(G_->getNumInputs()-1).dimString()); if (!H_.isNull()) casadi_assert_message(sp_p == H_->input(H_->getNumInputs()-1).sparsity(),"Parametric NLP has inconsistent parameter dimensions. F has got " << sp_p.dimString() << " as dimensions, while H has got " << H_->input(H_->getNumInputs()-1).dimString()); if (!J_.isNull()) casadi_assert_message(sp_p == J_->input(J_->getNumInputs()-1).sparsity(),"Parametric NLP has inconsistent parameter dimensions. F has got " << sp_p.dimString() << " as dimensions, while J has got " << J_->input(J_->getNumInputs()-1).dimString()); } // Infinity double inf = numeric_limits<double>::infinity(); // Allocate space for inputs input_.resize(NLP_NUM_IN - (parametric_? 0 : 1)); input(NLP_X_INIT) = DMatrix(n_,1,0); input(NLP_LBX) = DMatrix(n_,1,-inf); input(NLP_UBX) = DMatrix(n_,1, inf); input(NLP_LBG) = DMatrix(m_,1,-inf); input(NLP_UBG) = DMatrix(m_,1, inf); input(NLP_LAMBDA_INIT) = DMatrix(m_,1,0); if (parametric_) input(NLP_P) = DMatrix(sp_p,0); // Allocate space for outputs output_.resize(NLP_NUM_OUT); output(NLP_X_OPT) = DMatrix(n_,1,0); output(NLP_COST) = DMatrix(1,1,0); output(NLP_LAMBDA_X) = DMatrix(n_,1,0); output(NLP_LAMBDA_G) = DMatrix(m_,1,0); output(NLP_G) = DMatrix(m_,1,0); if (hasSetOption("iteration_callback")) { callback_ = getOption("iteration_callback"); if (!callback_.isNull()) { if (!callback_.isInit()) callback_.init(); casadi_assert_message(callback_.getNumOutputs()==1, "Callback function should have one output, a scalar that indicates wether to break. 0 = continue"); casadi_assert_message(callback_.output(0).size()==1, "Callback function should have one output, a scalar that indicates wether to break. 0 = continue"); casadi_assert_message(callback_.getNumInputs()==NLP_NUM_OUT, "Callback function should have the output scheme of NLPSolver as input scheme. i.e. " <<NLP_NUM_OUT << " inputs instead of the " << callback_.getNumInputs() << " you provided." ); for (int i=0;i<NLP_NUM_OUT;i++) { casadi_assert_message(callback_.input(i).sparsity()==output(i).sparsity(), "Callback function should have the output scheme of NLPSolver as input scheme. " << "Input #" << i << " (" << getSchemeEntryEnumName(SCHEME_NLPOutput,i) << " aka '" << getSchemeEntryName(SCHEME_NLPOutput,i) << "') was found to be " << callback_.input(i).dimString() << " instead of expected " << output(i).dimString() << "." ); callback_.input(i).setAll(0); } } } callback_step_ = getOption("iteration_callback_step"); // Call the initialization method of the base class FXInternal::init(); }
FX FX::create(FXInternal* node){ FX ret; ret.assignNode(node); return ret; }
void process_frame(simpl::Frame* frame, std::vector<double>& env) { // gradually change envelope interpolation until the applied // envelope is the linear ramp _interp += _interp_step; _fx->env_interp(_interp); }
void CollocationIntegratorInternal::init(){ // Call the base class init IntegratorInternal::init(); // Legendre collocation points double legendre_points[][6] = { {0}, {0,0.500000}, {0,0.211325,0.788675}, {0,0.112702,0.500000,0.887298}, {0,0.069432,0.330009,0.669991,0.930568}, {0,0.046910,0.230765,0.500000,0.769235,0.953090}}; // Radau collocation points double radau_points[][6] = { {0}, {0,1.000000}, {0,0.333333,1.000000}, {0,0.155051,0.644949,1.000000}, {0,0.088588,0.409467,0.787659,1.000000}, {0,0.057104,0.276843,0.583590,0.860240,1.000000}}; // Read options bool use_radau; if(getOption("collocation_scheme")=="radau"){ use_radau = true; } else if(getOption("collocation_scheme")=="legendre"){ use_radau = false; } // Hotstart? hotstart_ = getOption("hotstart"); // Number of finite elements int nk = getOption("number_of_finite_elements"); // Interpolation order int deg = getOption("interpolation_order"); // Assume explicit ODE bool explicit_ode = f_.input(DAE_XDOT).size()==0; // All collocation time points double* tau_root = use_radau ? radau_points[deg] : legendre_points[deg]; // Size of the finite elements double h = (tf_-t0_)/nk; // MX version of the same MX h_mx = h; // Coefficients of the collocation equation vector<vector<MX> > C(deg+1,vector<MX>(deg+1)); // Coefficients of the continuity equation vector<MX> D(deg+1); // Collocation point SXMatrix tau = ssym("tau"); // For all collocation points for(int j=0; j<deg+1; ++j){ // Construct Lagrange polynomials to get the polynomial basis at the collocation point SXMatrix L = 1; for(int j2=0; j2<deg+1; ++j2){ if(j2 != j){ L *= (tau-tau_root[j2])/(tau_root[j]-tau_root[j2]); } } SXFunction lfcn(tau,L); lfcn.init(); // Evaluate the polynomial at the final time to get the coefficients of the continuity equation lfcn.setInput(1.0); lfcn.evaluate(); D[j] = lfcn.output(); // Evaluate the time derivative of the polynomial at all collocation points to get the coefficients of the continuity equation for(int j2=0; j2<deg+1; ++j2){ lfcn.setInput(tau_root[j2]); lfcn.setFwdSeed(1.0); lfcn.evaluate(1,0); C[j][j2] = lfcn.fwdSens(); } } // Initial state MX X0("X0",nx_); // Parameters MX P("P",np_); // Backward state MX RX0("RX0",nrx_); // Backward parameters MX RP("RP",nrp_); // Collocated differential states and algebraic variables int nX = (nk*(deg+1)+1)*(nx_+nrx_); int nZ = nk*deg*(nz_+nrz_); // Unknowns MX V("V",nX+nZ); int offset = 0; // Get collocated states, algebraic variables and times vector<vector<MX> > X(nk+1); vector<vector<MX> > RX(nk+1); vector<vector<MX> > Z(nk); vector<vector<MX> > RZ(nk); coll_time_.resize(nk+1); for(int k=0; k<nk+1; ++k){ // Number of time points int nj = k==nk ? 1 : deg+1; // Allocate differential states expressions at the time points X[k].resize(nj); RX[k].resize(nj); coll_time_[k].resize(nj); // Allocate algebraic variable expressions at the collocation points if(k!=nk){ Z[k].resize(nj-1); RZ[k].resize(nj-1); } // For all time points for(int j=0; j<nj; ++j){ // Get expressions for the differential state X[k][j] = V[range(offset,offset+nx_)]; offset += nx_; RX[k][j] = V[range(offset,offset+nrx_)]; offset += nrx_; // Get the local time coll_time_[k][j] = h*(k + tau_root[j]); // Get expressions for the algebraic variables if(j>0){ Z[k][j-1] = V[range(offset,offset+nz_)]; offset += nz_; RZ[k][j-1] = V[range(offset,offset+nrz_)]; offset += nrz_; } } } // Check offset for consistency casadi_assert(offset==V.size()); // Constraints vector<MX> g; g.reserve(2*(nk+1)); // Quadrature expressions MX QF = MX::zeros(nq_); MX RQF = MX::zeros(nrq_); // Counter int jk = 0; // Add initial condition g.push_back(X[0][0]-X0); // For all finite elements for(int k=0; k<nk; ++k, ++jk){ // For all collocation points for(int j=1; j<deg+1; ++j, ++jk){ // Get the time MX tkj = coll_time_[k][j]; // Get an expression for the state derivative at the collocation point MX xp_jk = 0; for(int j2=0; j2<deg+1; ++j2){ xp_jk += C[j2][j]*X[k][j2]; } // Add collocation equations to the NLP vector<MX> f_in(DAE_NUM_IN); f_in[DAE_T] = tkj; f_in[DAE_P] = P; f_in[DAE_X] = X[k][j]; f_in[DAE_Z] = Z[k][j-1]; vector<MX> f_out; if(explicit_ode){ // Assume equation of the form ydot = f(t,y,p) f_out = f_.call(f_in); g.push_back(h_mx*f_out[DAE_ODE] - xp_jk); } else { // Assume equation of the form 0 = f(t,y,ydot,p) f_in[DAE_XDOT] = xp_jk/h_mx; f_out = f_.call(f_in); g.push_back(f_out[DAE_ODE]); } // Add the algebraic conditions if(nz_>0){ g.push_back(f_out[DAE_ALG]); } // Add the quadrature if(nq_>0){ QF += D[j]*h_mx*f_out[DAE_QUAD]; } // Now for the backward problem if(nrx_>0){ // Get an expression for the state derivative at the collocation point MX rxp_jk = 0; for(int j2=0; j2<deg+1; ++j2){ rxp_jk += C[j2][j]*RX[k][j2]; } // Add collocation equations to the NLP vector<MX> g_in(RDAE_NUM_IN); g_in[RDAE_T] = tkj; g_in[RDAE_X] = X[k][j]; g_in[RDAE_Z] = Z[k][j-1]; g_in[RDAE_P] = P; g_in[RDAE_RP] = RP; g_in[RDAE_RX] = RX[k][j]; g_in[RDAE_RZ] = RZ[k][j-1]; vector<MX> g_out; if(explicit_ode){ // Assume equation of the form xdot = f(t,x,p) g_out = g_.call(g_in); g.push_back(h_mx*g_out[RDAE_ODE] - rxp_jk); } else { // Assume equation of the form 0 = f(t,x,xdot,p) g_in[RDAE_XDOT] = xp_jk/h_mx; g_in[RDAE_RXDOT] = rxp_jk/h_mx; g_out = g_.call(g_in); g.push_back(g_out[RDAE_ODE]); } // Add the algebraic conditions if(nrz_>0){ g.push_back(g_out[RDAE_ALG]); } // Add the backward quadrature if(nrq_>0){ RQF += D[j]*h_mx*g_out[RDAE_QUAD]; } } } // Get an expression for the state at the end of the finite element MX xf_k = 0; for(int j=0; j<deg+1; ++j){ xf_k += D[j]*X[k][j]; } // Add continuity equation to NLP g.push_back(X[k+1][0] - xf_k); if(nrx_>0){ // Get an expression for the state at the end of the finite element MX rxf_k = 0; for(int j=0; j<deg+1; ++j){ rxf_k += D[j]*RX[k][j]; } // Add continuity equation to NLP g.push_back(RX[k+1][0] - rxf_k); } } // Add initial condition for the backward integration if(nrx_>0){ g.push_back(RX[nk][0]-RX0); } // Constraint expression MX gv = vertcat(g); // Make sure that the dimension is consistent with the number of unknowns casadi_assert_message(gv.size()==V.size(),"Implicit function unknowns and equations do not match"); // Nonlinear constraint function input vector<MX> gfcn_in(1+INTEGRATOR_NUM_IN); gfcn_in[0] = V; gfcn_in[1+INTEGRATOR_X0] = X0; gfcn_in[1+INTEGRATOR_P] = P; gfcn_in[1+INTEGRATOR_RX0] = RX0; gfcn_in[1+INTEGRATOR_RP] = RP; vector<MX> gfcn_out(1+INTEGRATOR_NUM_OUT); gfcn_out[0] = gv; gfcn_out[1+INTEGRATOR_XF] = X[nk][0]; gfcn_out[1+INTEGRATOR_QF] = QF; gfcn_out[1+INTEGRATOR_RXF] = RX[0][0]; gfcn_out[1+INTEGRATOR_RQF] = RQF; // Nonlinear constraint function FX gfcn = MXFunction(gfcn_in,gfcn_out); // Expand f? bool expand_f = getOption("expand_f"); if(expand_f){ gfcn.init(); gfcn = SXFunction(shared_cast<MXFunction>(gfcn)); } // Get the NLP creator function implicitFunctionCreator implicit_function_creator = getOption("implicit_solver"); // Allocate an NLP solver implicit_solver_ = implicit_function_creator(gfcn); // Pass options if(hasSetOption("implicit_solver_options")){ const Dictionary& implicit_solver_options = getOption("implicit_solver_options"); implicit_solver_.setOption(implicit_solver_options); } // Initialize the solver implicit_solver_.init(); if(hasSetOption("startup_integrator")){ // Create the linear solver integratorCreator startup_integrator_creator = getOption("startup_integrator"); // Allocate an NLP solver startup_integrator_ = startup_integrator_creator(f_,g_); // Pass options startup_integrator_.setOption("number_of_fwd_dir",0); // not needed startup_integrator_.setOption("number_of_adj_dir",0); // not needed startup_integrator_.setOption("t0",coll_time_.front().front()); startup_integrator_.setOption("tf",coll_time_.back().back()); if(hasSetOption("startup_integrator_options")){ const Dictionary& startup_integrator_options = getOption("startup_integrator_options"); startup_integrator_.setOption(startup_integrator_options); } // Initialize the startup integrator startup_integrator_.init(); } // Mark the system not yet integrated integrated_once_ = false; }
int main(){ // Test both SX and MX for(int test=0; test<2; ++test){ // Create a simple function FX f; if(test==0){ cout << "SXFunction:" << endl; SXMatrix x = ssym("x",3); SXMatrix z = x[0]*x[0]+x[2] + 3; f = SXFunction(x,z); } else { cout << "MXFunction:" << endl; MX x = msym("x",3); MX z = x[0]*x[0]+x[2] + 3; f = MXFunction(x,z); } f.init(); // Get arrays for the inputs and outputs, reinterpreting the vector of double as an array of unsigned integers bvec_t* f_in = get_bvec_t(f.input().data()); bvec_t* f_out = get_bvec_t(f.output().data()); // Propagate from input to output (forward mode) cout << "forward mode" << endl; int fwd = true; // Make sure that the class is able to support the dependency propagation casadi_assert(f.spCanEvaluate(fwd)); // Pass seeds f_in[0] = bvec_t(1) << 0; // seed in direction 0 f_in[1] = bvec_t(1) << 2; // seed in direction 2 f_in[2] = (bvec_t(1) << 4) | (bvec_t(1) << 63); // seed in direction 4 and 63 // Reset sensitivities f_out[0] = 0; // Propagate dependencies f.spInit(fwd); f.spEvaluate(fwd); // Print the result printBinary(f_out[0]); // Propagate from output to input (adjoint/reverse/backward mode) cout << "backward mode" << endl; fwd = false; // Make sure that the class is able to support the dependency propagation casadi_assert(f.spCanEvaluate(fwd)); // Pass seeds f_out[0] = (bvec_t(1) << 5) | (bvec_t(1) << 6); // seed in direction 5 and 6 // Reset sensitivities f_in[0] = 0; f_in[1] = 0; f_in[2] = 0; // Propagate dependencies f.spInit(fwd); f.spEvaluate(fwd); // Print the result printBinary(f_in[0]); printBinary(f_in[1]); printBinary(f_in[2]); } return 0; }
// Create an IDAS instance (fully implicit integrator) Integrator create_Sundials(){ // Time SX t("t"); // Differential states SX s("s"), v("v"), m("m"); vector<SX> x(3); x[0] = s; x[1] = v; x[2] = m; // State derivatives SX sdot("sdot"), vdot("vdot"), mdot("mdot"); vector<SX> xdot(3); xdot[0] = sdot; xdot[1] = vdot; xdot[2] = mdot; // Control SX u("u"); // Reference trajectory SX u_ref = 3-sin(t); // Square deviation from the state trajectory SX u_dev = u-u_ref; u_dev *= u_dev; // Differential equation (fully implicit form) vector<SX> res(3); res[0] = v - sdot; res[1] = (u-0.02*v*v)/m - vdot; res[2] = -0.01*u*u - mdot; // Input/output of the DAE residual function vector<SXMatrix> ffcn_in = daeIn<SXMatrix>("x",x, "p",u, "t",t, "xdot",xdot); vector<SXMatrix> ffcn_out = daeOut<SXMatrix>("ode",res, "quad",u_dev); // DAE residual function FX ffcn = SXFunction(ffcn_in,ffcn_out); // Overwrite ffcn with a plain c function (avoid this!) if(plain_c){ // Use DAE residual defined in a c-function ffcn = CFunction(dae_res_c_wrapper); // Specify the number of inputs and outputs ffcn.setNumInputs(DAE_NUM_IN); ffcn.setNumOutputs(DAE_NUM_OUT); // Specify dimensions of inputs and outputs ffcn.input(DAE_T) = DMatrix(1,1,0); ffcn.input(DAE_X) = DMatrix(3,1,0); ffcn.input(DAE_XDOT) = DMatrix(3,1,0); ffcn.input(DAE_P) = DMatrix(1,1,0); ffcn.output(DAE_ODE) = DMatrix(3,1,0); ffcn.output(DAE_QUAD) = DMatrix(1,1,0); } if(implicit_integrator){ // Create an IDAS instance IdasIntegrator integrator(ffcn); // Set IDAS specific options integrator.setOption("calc_ic",calc_ic); // Return the integrator return integrator; } else { // Create an CVodes instance CVodesIntegrator integrator(ffcn); // Return the integrator return integrator; } }
int main(){ // Time horizon double t0 = 0, tf = 10; // Bounds on the control double /*u_lb = -0.5, u_ub = 1.3,*/ u_init = 1; // Initial conditions vector<double> x0(3); x0[0] = 0; x0[1] = 0; x0[2] = 1; // Integrator Integrator integrator = create_Sundials(); // Attach user-defined linear solver if(user_defined_solver){ if(sparse_direct){ integrator.setOption("linear_solver_creator",CSparse::creator); // integrator.setOption("linear_solver_creator",SuperLU::creator); } else { integrator.setOption("linear_solver_creator",LapackLUDense::creator); integrator.setOption("linear_solver","user_defined"); } // integrator.setOption("linear_solver","user_defined"); // FIXME: bug for second order } // Set common integrator options integrator.setOption("fsens_err_con",true); integrator.setOption("quad_err_con",true); integrator.setOption("abstol",1e-12); integrator.setOption("reltol",1e-12); integrator.setOption("fsens_abstol",1e-6); integrator.setOption("fsens_reltol",1e-6); integrator.setOption("asens_abstol",1e-6); integrator.setOption("asens_reltol",1e-6); integrator.setOption("exact_jacobian",exact_jacobian); integrator.setOption("finite_difference_fsens",finite_difference_fsens); integrator.setOption("max_num_steps",100000); // integrator.setOption("max_multistep_order",4); integrator.setOption("t0",t0); integrator.setOption("tf",tf); // Initialize the integrator integrator.init(); // Set parameters integrator.setInput(u_init,INTEGRATOR_P); // Set inital state integrator.setInput(x0,INTEGRATOR_X0); // Integrate integrator.evaluate(); // Save the result Matrix<double> res0_xf = integrator.output(INTEGRATOR_XF); Matrix<double> res0_qf = integrator.output(INTEGRATOR_QF); // Perturb in some direction if(perturb_u){ double u_pert = u_init + 0.01; integrator.setInput(u_pert,INTEGRATOR_P); } else { vector<double> x_pert = x0; x_pert[1] += 0.01; integrator.setInput(x_pert,INTEGRATOR_X0); } // Integrate again integrator.evaluate(); // Print statistics integrator.printStats(); // Calculate finite difference approximation Matrix<double> fd_xf = (integrator.output(INTEGRATOR_XF) - res0_xf)/0.01; Matrix<double> fd_qf = (integrator.output(INTEGRATOR_QF) - res0_qf)/0.01; cout << "unperturbed " << res0_xf << "; " << res0_qf << endl; cout << "perturbed " << integrator.output(INTEGRATOR_XF) << "; " << integrator.output(INTEGRATOR_QF) << endl; cout << "finite_difference approximation " << fd_xf << "; " << fd_qf << endl; if(perturb_u){ integrator.setFwdSeed(1.0,INTEGRATOR_P); } else { vector<double> x0_seed(x0.size(),0); x0_seed[1] = 1; integrator.setFwdSeed(x0_seed,INTEGRATOR_X0); } // Reset parameters integrator.setInput(u_init,INTEGRATOR_P); // Reset initial state integrator.setInput(x0,INTEGRATOR_X0); if(with_asens){ // backward seeds vector<double> &bseed = integrator.adjSeed(INTEGRATOR_XF).data(); fill(bseed.begin(),bseed.end(),0); bseed[1] = 1; // evaluate with forward and adjoint sensitivities integrator.evaluate(1,1); } else { // evaluate with only forward sensitivities integrator.evaluate(1,0); } Matrix<double> fsens_xf = integrator.fwdSens(INTEGRATOR_XF); Matrix<double> fsens_qf = integrator.fwdSens(INTEGRATOR_QF); cout << "forward sensitivities " << fsens_xf << "; " << fsens_qf << endl; if(with_asens){ cout << "adjoint sensitivities "; cout << integrator.adjSens(INTEGRATOR_X0) << "; "; cout << integrator.adjSens(INTEGRATOR_P) << "; "; cout << endl; } if(second_order){ // Preturb the forward seeds if(perturb_u){ double u_seed = 1.001; integrator.setFwdSeed(u_seed,INTEGRATOR_P); } else { vector<double> x0_seed(x0.size(),0); x0_seed[1] = 1.001; integrator.setFwdSeed(x0_seed,INTEGRATOR_X0); } // evaluate again with forward sensitivities integrator.evaluate(1,0); vector<double> fsens_pret_xf = integrator.fwdSens(INTEGRATOR_XF).data(); vector<double> fsens_pret_qf = integrator.fwdSens(INTEGRATOR_QF).data(); cout << "forward sensitivities preturbed " << fsens_pret_xf << "; " << fsens_pret_qf << endl; vector<double> fd2_xf(fsens_xf.size()); vector<double> fd2_qf(fsens_qf.size()); for(int i=0; i<fd2_xf.size(); ++i) fd2_xf[i] = (fsens_pret_xf.at(i)-fsens_xf.at(i))/0.001; for(int i=0; i<fd2_qf.size(); ++i) fd2_qf[i] = (fsens_pret_qf.at(i)-fsens_qf.at(i))/0.001; cout << "finite differences, 2nd order " << fd2_xf << "; " << fd2_qf << endl; // Generate the jacobian by creating a new integrator for the sensitivity equations by source transformation FX intjac = integrator.jacobian(INTEGRATOR_P,INTEGRATOR_XF); // Set options intjac.setOption("number_of_fwd_dir",0); intjac.setOption("number_of_adj_dir",1); // Initialize the integrator intjac.init(); // Set inputs intjac.setInput(u_init,INTEGRATOR_P); intjac.setInput(x0,INTEGRATOR_X0); // Set adjoint seed vector<double> jacseed(3*1,0); jacseed[0] = 1; intjac.setAdjSeed(jacseed); // Evaluate the Jacobian intjac.evaluate(0,0); cout << "jacobian " << intjac.output(0) << endl; // Get the results /* cout << "unperturbed via jacobian " << intjac.output(1+INTEGRATOR_XF) << endl;*/ cout << "second order (fwd-over-adj) " ; cout << intjac.adjSens(INTEGRATOR_X0) << ", "; cout << intjac.adjSens(INTEGRATOR_P) << endl; // Save the unpreturbed value Matrix<double> unpret = intjac.output(); // Perturb X0 intjac.setInput(u_init+0.01,INTEGRATOR_P); intjac.evaluate(); Matrix<double> pret = intjac.output(); cout << "unperturbed fwd sens " << unpret << endl; cout << "perturbed fwd sens " << pret << endl; cout << "finite diff. (augmented dae) " << (pret-unpret)/0.01 << endl; } return 0; }