CRSSparsity sp_rowcol(std::vector<int> row, std::vector<int> col, int nrow, int ncol) { std::vector<int> rowind(nrow+1); std::vector<int> col_new(row.size()*col.size()); // resulting col: the entries of col are repeated row.size() times for (int i=0;i<row.size();i++) std::copy(col.begin(),col.end(),col_new.begin()+col.size()*i); // resulting rowind: first entry is always zero int cnt=0; int z=0; rowind[0]=0; int k=0; try { for (k=0; k < row.size(); k++) { // resulting rowind: fill up rowind entries with copies while (z<row[k]) rowind.at(++z)=cnt; // resulting rowind: add col.size() at each row[k] rowind.at(row[k]+1)=(cnt+=col.size()); } while (z<nrow) rowind.at(++z)=cnt; } catch (out_of_range& oor) { casadi_error( "sp_rowcol: out-of-range error." << endl << "The " << k << "th entry of row (" << row[k] << ") was bigger or equal to the specified total number of rows (" << nrow << ")" ); } return CRSSparsity(nrow, ncol, col_new, rowind); }
CRSSparsity CSparseCholeskyInternal::getFactorizationSparsity() const { casadi_assert(S_); int n = AT_.n; int nzmax = S_->cp[n]; std::vector< int > col(n+1); std::copy(S_->cp,S_->cp+n+1,col.begin()); std::vector< int > rowind(nzmax); int *Li = &rowind.front(); int *Lp = &col.front(); const cs* C; C = S_->pinv ? cs_symperm (&AT_, S_->pinv, 1) : &AT_; std::vector< int > temp(2*n); int *c = & temp.front(); int *s = c+n; for (int k = 0 ; k < n ; k++) c [k] = S_->cp [k] ; for (int k = 0 ; k < n ; k++) { /* compute L(k,:) for L*L' = C */ int top = cs_ereach (C, k, S_->parent, s, c) ; for ( ; top < n ; top++) /* solve L(0:k-1,0:k-1) * x = C(:,k) */ { int i = s [top] ; /* s [top..n-1] is pattern of L(k,:) */ int p = c [i]++ ; Li [p] = k ; /* store L(k,i) in column i */ } int p = c [k]++ ; Li [p] = k ; } Lp [n] = S_->cp [n] ; return trans(CRSSparsity(n, n, rowind, col)); }
void ImplicitFunctionInternal::init(){ // Initialize the residual function if(!f_.isInit()) f_.init(); // Allocate inputs setNumInputs(f_.getNumInputs()-1); for(int i=0; i<getNumInputs(); ++i){ input(i) = f_.input(i+1); } // Allocate outputs setNumOutputs(f_.getNumOutputs()); output(0) = f_.input(0); for(int i=1; i<getNumOutputs(); ++i){ output(i) = f_.output(i); } // Call the base class initializer FXInternal::init(); // Number of equations N_ = output().size(); // Generate Jacobian if not provided if(J_.isNull()) J_ = f_.jacobian(0,0); J_.init(); casadi_assert_message(J_.output().size1()==J_.output().size2(),"ImplicitFunctionInternal::init: the jacobian must be square but got " << J_.output().dimString()); casadi_assert_message(!isSingular(J_.output().sparsity()),"ImplicitFunctionInternal::init: singularity - the jacobian is structurally rank-deficient. sprank(J)=" << sprank(J_.output()) << " (in stead of "<< J_.output().size1() << ")"); // Get the linear solver creator function if(linsol_.isNull() && hasSetOption("linear_solver")){ linearSolverCreator linear_solver_creator = getOption("linear_solver"); // Allocate an NLP solver linsol_ = linear_solver_creator(CRSSparsity()); // Pass options if(hasSetOption("linear_solver_options")){ const Dictionary& linear_solver_options = getOption("linear_solver_options"); linsol_.setOption(linear_solver_options); } } // Initialize the linear solver, if provided if(!linsol_.isNull()){ linsol_.setSparsity(J_.output().sparsity()); linsol_.init(); } // Allocate memory for directional derivatives ImplicitFunctionInternal::updateNumSens(false); }
EvaluationMX::EvaluationMX(const FX& fcn, const std::vector<MX> &arg) : fcn_(fcn) { // Number inputs and outputs int num_in = fcn.getNumInputs(); // All dependencies of the function vector<MX> d = arg; d.resize(num_in); setDependencies(d); setSparsity(CRSSparsity(1, 1, true)); }
DMatrix CSparseCholeskyInternal::getFactorization() const { casadi_assert(L_); cs *L = L_->L; int nz = L->nzmax; int m = L->m; // number of rows int n = L->n; // number of columns std::vector< int > rowind(m+1); std::copy(L->p,L->p+m+1,rowind.begin()); std::vector< int > col(nz); std::copy(L->i,L->i+nz,col.begin()); std::vector< double > data(nz); std::copy(L->x,L->x+nz,data.begin()); return trans(DMatrix(CRSSparsity(m, n, col, rowind),data)); }
CRSSparsity sp_tril(int n) { if (n<0) throw CasadiException("sp_tril expects a positive integer as argument"); int c=0; int t=0; std::vector< int > col((n*(n+1))/2,0); for (int i=0;i<(n*(n+1))/2;i++) { col[i]=t++; if (t>c) { t=0; c++; } } std::vector< int > rowind(n+1,0); c=0; for (int i=1;i<n+1;i++) rowind[i]=rowind[i-1]+1+(c++); return CRSSparsity(n,n,col,rowind); }
CRSSparsity ParallelizerInternal::getJacSparsity(int iind, int oind, bool symmetric){ // Number of tasks int ntask = inind_.size()-1; // Find out which task corresponds to the iind int task; for(task=0; task<ntask && iind>=inind_[task+1]; ++task); // Check if the output index is also in this task if(oind>=outind_[task] && oind<outind_[task+1]){ // Get the Jacobian index int iind_f = iind-inind_[task]; int oind_f = oind-outind_[task]; // Get the local sparsity patterm return funcs_.at(task).jacSparsity(iind_f,oind_f); } else { // All-zero jacobian return CRSSparsity(); } }
CRSSparsity sp_band(int n, int p) { casadi_assert_message(n>=0, "sp_band expects a positive integer as argument"); casadi_assert_message((p<0? -p : p)<n, "sp_band: position of band schould be smaller then size argument"); int nc = n-(p<0? -p : p); std::vector< int > col(nc); int offset = max(p,0); for (int i=0;i<nc;i++) { col[i]=i+offset; } std::vector< int > rowind(n+1); offset = min(p,0); for (int i=0;i<n+1;i++) { rowind[i]=max(min(i+offset,nc),0); } return CRSSparsity(n,n,col,rowind); }
Densification::Densification(const MX& x){ setDependencies(x); setSparsity(CRSSparsity(x.size1(),x.size2(),true)); }
SymbolicMX::SymbolicMX(const std::string& name, int n, int m) : name_(name) { setSparsity(CRSSparsity(n,m,true)); }
void SQPInternal::init(){ // Call the init method of the base class NLPSolverInternal::init(); // Read options maxiter_ = getOption("maxiter"); maxiter_ls_ = getOption("maxiter_ls"); c1_ = getOption("c1"); beta_ = getOption("beta"); merit_memsize_ = getOption("merit_memory"); lbfgs_memory_ = getOption("lbfgs_memory"); tol_pr_ = getOption("tol_pr"); tol_du_ = getOption("tol_du"); regularize_ = getOption("regularize"); if(getOption("hessian_approximation")=="exact") hess_mode_ = HESS_EXACT; else if(getOption("hessian_approximation")=="limited-memory") hess_mode_ = HESS_BFGS; if (hess_mode_== HESS_EXACT && H_.isNull()) { if (!getOption("generate_hessian")){ casadi_error("SQPInternal::evaluate: you set option 'hessian_approximation' to 'exact', but no hessian was supplied. Try with option \"generate_hessian\"."); } } // If the Hessian is generated, we use exact approximation by default if (bool(getOption("generate_hessian"))){ setOption("hessian_approximation", "exact"); } // Allocate a QP solver CRSSparsity H_sparsity = hess_mode_==HESS_EXACT ? H_.output().sparsity() : sp_dense(n_,n_); H_sparsity = H_sparsity + DMatrix::eye(n_).sparsity(); CRSSparsity A_sparsity = J_.isNull() ? CRSSparsity(0,n_,false) : J_.output().sparsity(); QPSolverCreator qp_solver_creator = getOption("qp_solver"); qp_solver_ = qp_solver_creator(H_sparsity,A_sparsity); // Set options if provided if(hasSetOption("qp_solver_options")){ Dictionary qp_solver_options = getOption("qp_solver_options"); qp_solver_.setOption(qp_solver_options); } qp_solver_.init(); // Lagrange multipliers of the NLP mu_.resize(m_); mu_x_.resize(n_); // Lagrange gradient in the next iterate gLag_.resize(n_); gLag_old_.resize(n_); // Current linearization point x_.resize(n_); x_cand_.resize(n_); x_old_.resize(n_); // Constraint function value gk_.resize(m_); gk_cand_.resize(m_); // Hessian approximation Bk_ = DMatrix(H_sparsity); // Jacobian Jk_ = DMatrix(A_sparsity); // Bounds of the QP qp_LBA_.resize(m_); qp_UBA_.resize(m_); qp_LBX_.resize(n_); qp_UBX_.resize(n_); // QP solution dx_.resize(n_); qp_DUAL_X_.resize(n_); qp_DUAL_A_.resize(m_); // Gradient of the objective gf_.resize(n_); // Create Hessian update function if(hess_mode_ == HESS_BFGS){ // Create expressions corresponding to Bk, x, x_old, gLag and gLag_old SXMatrix Bk = ssym("Bk",H_sparsity); SXMatrix x = ssym("x",input(NLP_X_INIT).sparsity()); SXMatrix x_old = ssym("x",x.sparsity()); SXMatrix gLag = ssym("gLag",x.sparsity()); SXMatrix gLag_old = ssym("gLag_old",x.sparsity()); SXMatrix sk = x - x_old; SXMatrix yk = gLag - gLag_old; SXMatrix qk = mul(Bk, sk); // Calculating theta SXMatrix skBksk = inner_prod(sk, qk); SXMatrix omega = if_else(inner_prod(yk, sk) < 0.2 * inner_prod(sk, qk), 0.8 * skBksk / (skBksk - inner_prod(sk, yk)), 1); yk = omega * yk + (1 - omega) * qk; SXMatrix theta = 1. / inner_prod(sk, yk); SXMatrix phi = 1. / inner_prod(qk, sk); SXMatrix Bk_new = Bk + theta * mul(yk, trans(yk)) - phi * mul(qk, trans(qk)); // Inputs of the BFGS update function vector<SXMatrix> bfgs_in(BFGS_NUM_IN); bfgs_in[BFGS_BK] = Bk; bfgs_in[BFGS_X] = x; bfgs_in[BFGS_X_OLD] = x_old; bfgs_in[BFGS_GLAG] = gLag; bfgs_in[BFGS_GLAG_OLD] = gLag_old; bfgs_ = SXFunction(bfgs_in,Bk_new); bfgs_.setOption("number_of_fwd_dir",0); bfgs_.setOption("number_of_adj_dir",0); bfgs_.init(); // Initial Hessian approximation B_init_ = DMatrix::eye(n_); } // Header if(bool(getOption("print_header"))){ cout << "-------------------------------------------" << endl; cout << "This is CasADi::SQPMethod." << endl; switch (hess_mode_) { case HESS_EXACT: cout << "Using exact Hessian" << endl; break; case HESS_BFGS: cout << "Using limited memory BFGS Hessian approximation" << endl; break; } cout << endl; cout << "Number of variables: " << setw(9) << n_ << endl; cout << "Number of constraints: " << setw(9) << m_ << endl; cout << "Number of nonzeros in constraint Jacobian: " << setw(9) << A_sparsity.size() << endl; cout << "Number of nonzeros in Lagrangian Hessian: " << setw(9) << H_sparsity.size() << endl; cout << endl; } }
CRSSparsity sp_sparse(const std::pair<int,int> &nm) { return CRSSparsity(nm.first,nm.second,false); }
CRSSparsity sp_dense(const std::pair<int,int> &nm) { return CRSSparsity(nm.first,nm.second,true); }
CRSSparsity sp_sparse(int n, int m) { return CRSSparsity(n,m,false); }
CRSSparsity sp_dense(int n, int m) { return CRSSparsity(n,m,true); }