Ejemplo n.º 1
0
void NLPSolverInternal::init(){
  // Read options
  verbose_ = getOption("verbose");
  gauss_newton_ = getOption("gauss_newton");
  
  // Initialize the functions
  casadi_assert_message(!F_.isNull(),"No objective function");
  if(!F_.isInit()){
    F_.init();
    log("Objective function initialized");
  }
  if(!G_.isNull() && !G_.isInit()){
    G_.init();
    log("Constraint function initialized");
  }

  // Get dimensions
  n_ = F_.input(0).numel();
  m_ = G_.isNull() ? 0 : G_.output(0).numel();

  parametric_ = getOption("parametric");
  
  if (parametric_) {
    casadi_assert_message(F_.getNumInputs()==2, "Wrong number of input arguments to F for parametric NLP. Must be 2, but got " << F_.getNumInputs());
  } else {
    casadi_assert_message(F_.getNumInputs()==1, "Wrong number of input arguments to F for non-parametric NLP. Must be 1, but got " << F_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option.");
  }

  // Basic sanity checks
  casadi_assert_message(F_.getNumInputs()==1 || F_.getNumInputs()==2, "Wrong number of input arguments to F. Must be 1 or 2");
  
  if (F_.getNumInputs()==2) parametric_=true;
  casadi_assert_message(getOption("ignore_check_vec") || gauss_newton_ || F_.input().size2()==1,
     "To avoid confusion, the input argument to F must be vector. You supplied " << F_.input().dimString() << endl <<
     " We suggest you make the following changes:" << endl <<
     "   -  F is an SXFunction:  SXFunction([X],[rhs]) -> SXFunction([vec(X)],[rhs])" << endl <<
     "             or            F -                   ->  F = vec(F) " << 
     "   -  F is an MXFunction:  MXFunction([X],[rhs]) -> " <<  endl <<
     "                                     X_vec = MX(\"X\",vec(X.sparsity())) " << endl <<
     "                                     F_vec = MXFunction([X_flat],[F.call([X_flat.reshape(X.sparsity())])[0]]) " << endl <<
     "             or            F -                   ->  F = vec(F) " << 
     " You may ignore this warning by setting the 'ignore_check_vec' option to true." << endl
  );
  
  casadi_assert_message(F_.getNumOutputs()>=1, "Wrong number of output arguments to F");
  casadi_assert_message(gauss_newton_  || F_.output().scalar(), "Output argument of F not scalar.");
  casadi_assert_message(F_.output().dense(), "Output argument of F not dense.");
  casadi_assert_message(F_.input().dense(), "Input argument of F must be dense. You supplied " << F_.input().dimString());
  
  if(!G_.isNull()) {
    if (parametric_) {
      casadi_assert_message(G_.getNumInputs()==2, "Wrong number of input arguments to G for parametric NLP. Must be 2, but got " << G_.getNumInputs());
    } else {
      casadi_assert_message(G_.getNumInputs()==1, "Wrong number of input arguments to G for non-parametric NLP. Must be 1, but got " << G_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option.");
    }
    casadi_assert_message(G_.getNumOutputs()>=1, "Wrong number of output arguments to G");
    casadi_assert_message(G_.input().numel()==n_, "Inconsistent dimensions");
    casadi_assert_message(G_.input().sparsity()==F_.input().sparsity(), "F and G input dimension must match. F " << F_.input().dimString() << ". G " << G_.input().dimString());
  }
  
  // Find out if we are to expand the objective function in terms of scalar operations
  bool expand_f = getOption("expand_f");
  if(expand_f){
    log("Expanding objective function");
    
    // Cast to MXFunction
    MXFunction F_mx = shared_cast<MXFunction>(F_);
    if(F_mx.isNull()){
      casadi_warning("Cannot expand objective function as it is not an MXFunction");
    } else {
      // Take use the input scheme of G if possible (it might be an SXFunction)
      vector<SXMatrix> inputv;
      if(!G_.isNull() && F_.getNumInputs()==G_.getNumInputs()){
        inputv = G_.symbolicInputSX();
      } else {
        inputv = F_.symbolicInputSX();
      }
      
      // Try to expand the MXFunction
      F_ = F_mx.expand(inputv);
      F_.setOption("number_of_fwd_dir",F_mx.getOption("number_of_fwd_dir"));
      F_.setOption("number_of_adj_dir",F_mx.getOption("number_of_adj_dir"));
      F_.init();
    }
  }
  
  
  // Find out if we are to expand the constraint function in terms of scalar operations
  bool expand_g = getOption("expand_g");
  if(expand_g){
    log("Expanding constraint function");
    
    // Cast to MXFunction
    MXFunction G_mx = shared_cast<MXFunction>(G_);
    if(G_mx.isNull()){
      casadi_warning("Cannot expand constraint function as it is not an MXFunction");
    } else {
      // Take use the input scheme of F if possible (it might be an SXFunction)
      vector<SXMatrix> inputv;
      if(F_.getNumInputs()==G_.getNumInputs()){
        inputv = F_.symbolicInputSX();
      } else {
        inputv = G_.symbolicInputSX();
      }
      
      // Try to expand the MXFunction
      G_ = G_mx.expand(inputv);
      G_.setOption("number_of_fwd_dir",G_mx.getOption("number_of_fwd_dir"));
      G_.setOption("number_of_adj_dir",G_mx.getOption("number_of_adj_dir"));
      G_.init();
    }
  }
  
  // Find out if we are to expand the constraint function in terms of scalar operations
  bool generate_hessian = getOption("generate_hessian");
  if(generate_hessian && H_.isNull()){
    casadi_assert_message(!gauss_newton_,"Automatic generation of Gauss-Newton Hessian not yet supported");
    log("generating hessian");
    
    // Simple if unconstrained
    if(G_.isNull()){
      // Create Hessian of the objective function
      FX HF = F_.hessian();
      HF.init();
      
      // Symbolic inputs of HF
      vector<MX> HF_in = F_.symbolicInput();
      
      // Lagrange multipliers
      MX lam("lam",0);
      
      // Objective function scaling
      MX sigma("sigma");
      
      // Inputs of the Hessian function
      vector<MX> H_in = HF_in;
      H_in.insert(H_in.begin()+1, lam);
      H_in.insert(H_in.begin()+2, sigma);

      // Get an expression for the Hessian of F
      MX hf = HF.call(HF_in).at(0);
      
      // Create the scaled Hessian function
      H_ = MXFunction(H_in, sigma*hf);
      log("Unconstrained Hessian function generated");
      
    } else { // G_.isNull()
      
      // Check if the functions are SXFunctions
      SXFunction F_sx = shared_cast<SXFunction>(F_);
      SXFunction G_sx = shared_cast<SXFunction>(G_);
      
      // Efficient if both functions are SXFunction
      if(!F_sx.isNull() && !G_sx.isNull()){
        // Expression for f and g
        SXMatrix f = F_sx.outputSX();
        SXMatrix g = G_sx.outputSX();
        
        // Numeric hessian
        bool f_num_hess = F_sx.getOption("numeric_hessian");
        bool g_num_hess = G_sx.getOption("numeric_hessian");
        
        // Number of derivative directions
        int f_num_fwd = F_sx.getOption("number_of_fwd_dir");
        int g_num_fwd = G_sx.getOption("number_of_fwd_dir");
        int f_num_adj = F_sx.getOption("number_of_adj_dir");
        int g_num_adj = G_sx.getOption("number_of_adj_dir");
        
        // Substitute symbolic variables in f if different input variables from g
        if(!isEqual(F_sx.inputSX(),G_sx.inputSX())){
          f = substitute(f,F_sx.inputSX(),G_sx.inputSX());
        }
        
        // Lagrange multipliers
        SXMatrix lam = ssym("lambda",g.size1());

        // Objective function scaling
        SXMatrix sigma = ssym("sigma");        
        
        // Lagrangian function
        vector<SXMatrix> lfcn_in(parametric_? 4: 3);
        lfcn_in[0] = G_sx.inputSX();
        lfcn_in[1] = lam;
        lfcn_in[2] = sigma;
        if (parametric_) lfcn_in[3] = G_sx.inputSX(1);
        SXFunction lfcn(lfcn_in, sigma*f + inner_prod(lam,g));
        lfcn.setOption("verbose",getOption("verbose"));
        lfcn.setOption("numeric_hessian",f_num_hess || g_num_hess);
        lfcn.setOption("number_of_fwd_dir",std::min(f_num_fwd,g_num_fwd));
        lfcn.setOption("number_of_adj_dir",std::min(f_num_adj,g_num_adj));
        lfcn.init();
        
        // Hessian of the Lagrangian
        H_ = static_cast<FX&>(lfcn).hessian();
        H_.setOption("verbose",getOption("verbose"));
        log("SX Hessian function generated");
        
      } else { // !F_sx.isNull() && !G_sx.isNull()
        // Check if the functions are SXFunctions
        MXFunction F_mx = shared_cast<MXFunction>(F_);
        MXFunction G_mx = shared_cast<MXFunction>(G_);
        
        // If they are, check if the arguments are the same
        if(!F_mx.isNull() && !G_mx.isNull() && isEqual(F_mx.inputMX(),G_mx.inputMX())){
          casadi_warning("Exact Hessian calculation for MX is still experimental");
          
          // Expression for f and g
          MX f = F_mx.outputMX();
          MX g = G_mx.outputMX();
          
          // Lagrange multipliers
          MX lam("lam",g.size1());
      
          // Objective function scaling
          MX sigma("sigma");

          // Inputs of the Lagrangian function
          vector<MX> lfcn_in(parametric_? 4:3);
          lfcn_in[0] = G_mx.inputMX();
          lfcn_in[1] = lam;
          lfcn_in[2] = sigma;
          if (parametric_) lfcn_in[3] = G_mx.inputMX(1);

          // Lagrangian function
          MXFunction lfcn(lfcn_in,sigma*f+ inner_prod(lam,g));
          lfcn.init();
	  log("SX Lagrangian function generated");
          
/*          cout << "countNodes(lfcn.outputMX()) = " << countNodes(lfcn.outputMX()) << endl;*/
      
          bool adjoint_mode = true;
          if(adjoint_mode){
          
            // Gradient of the lagrangian
            MX gL = lfcn.grad();
            log("MX Lagrangian gradient generated");

            MXFunction glfcn(lfcn_in,gL);
            glfcn.init();
            log("MX Lagrangian gradient function initialized");
//           cout << "countNodes(glfcn.outputMX()) = " << countNodes(glfcn.outputMX()) << endl;

            // Get Hessian sparsity
            CRSSparsity H_sp = glfcn.jacSparsity();
            log("MX Lagrangian Hessian sparsity determined");
            
            // Uni-directional coloring (note, the hessian is symmetric)
            CRSSparsity coloring = H_sp.unidirectionalColoring(H_sp);
            log("MX Lagrangian Hessian coloring determined");

            // Number of colors needed is the number of rows
            int nfwd_glfcn = coloring.size1();
            log("MX Lagrangian gradient function number of sensitivity directions determined");

            glfcn.setOption("number_of_fwd_dir",nfwd_glfcn);
            glfcn.updateNumSens();
            log("MX Lagrangian gradient function number of sensitivity directions updated");
            
            // Hessian of the Lagrangian
            H_ = glfcn.jacobian();
          } else {

            // Hessian of the Lagrangian
            H_ = lfcn.hessian();
            
          }
          log("MX Lagrangian Hessian function generated");
          
        } else {
          casadi_assert_message(0, "Automatic calculation of exact Hessian currently only for F and G both SXFunction or MXFunction ");
        }
      } // !F_sx.isNull() && !G_sx.isNull()
    } // G_.isNull()
  } // generate_hessian && H_.isNull()
  if(!H_.isNull() && !H_.isInit()) {
    H_.init();
    log("Hessian function initialized");
  }

  // Create a Jacobian if it does not already exists
  bool generate_jacobian = getOption("generate_jacobian");
  if(generate_jacobian && !G_.isNull() && J_.isNull()){
    log("Generating Jacobian");
    J_ = G_.jacobian();
    
    // Use live variables if SXFunction
    if(!shared_cast<SXFunction>(J_).isNull()){
      J_.setOption("live_variables",true);
    }
    log("Jacobian function generated");
  }
    
  if(!J_.isNull() && !J_.isInit()){
    J_.init();
    log("Jacobian function initialized");
  }

  
  if(!H_.isNull()) {
    if (parametric_) {
      casadi_assert_message(H_.getNumInputs()>=2, "Wrong number of input arguments to H for parametric NLP. Must be at least 2, but got " << G_.getNumInputs());
    } else {
      casadi_assert_message(H_.getNumInputs()>=1, "Wrong number of input arguments to H for non-parametric NLP. Must be at least 1, but got " << G_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option.");
    }
    casadi_assert_message(H_.getNumOutputs()>=1, "Wrong number of output arguments to H");
    casadi_assert_message(H_.input(0).numel()==n_,"Inconsistent dimensions");
    casadi_assert_message(H_.output().size1()==n_,"Inconsistent dimensions");
    casadi_assert_message(H_.output().size2()==n_,"Inconsistent dimensions");
  }

  if(!J_.isNull()){
    if (parametric_) {
      casadi_assert_message(J_.getNumInputs()==2, "Wrong number of input arguments to J for parametric NLP. Must be at least 2, but got " << G_.getNumInputs());
    } else {
      casadi_assert_message(J_.getNumInputs()==1, "Wrong number of input arguments to J for non-parametric NLP. Must be at least 1, but got " << G_.getNumInputs() << " instead. Do you perhaps intend to use fixed parameters? Then use the 'parametric' option.");
    }
    casadi_assert_message(J_.getNumOutputs()>=1, "Wrong number of output arguments to J");
    casadi_assert_message(J_.input().numel()==n_,"Inconsistent dimensions");
    casadi_assert_message(J_.output().size2()==n_,"Inconsistent dimensions");
  }

  if (parametric_) {
    sp_p = F_->input(1).sparsity();
    
    if (!G_.isNull()) casadi_assert_message(sp_p == G_->input(G_->getNumInputs()-1).sparsity(),"Parametric NLP has inconsistent parameter dimensions. F has got " << sp_p.dimString() << " as dimensions, while G has got " << G_->input(G_->getNumInputs()-1).dimString());
    if (!H_.isNull()) casadi_assert_message(sp_p == H_->input(H_->getNumInputs()-1).sparsity(),"Parametric NLP has inconsistent parameter dimensions. F has got " << sp_p.dimString() << " as dimensions, while H has got " << H_->input(H_->getNumInputs()-1).dimString());
    if (!J_.isNull()) casadi_assert_message(sp_p == J_->input(J_->getNumInputs()-1).sparsity(),"Parametric NLP has inconsistent parameter dimensions. F has got " << sp_p.dimString() << " as dimensions, while J has got " << J_->input(J_->getNumInputs()-1).dimString());
  }
  
  // Infinity
  double inf = numeric_limits<double>::infinity();
  
  // Allocate space for inputs
  input_.resize(NLP_NUM_IN - (parametric_? 0 : 1));
  input(NLP_X_INIT)      = DMatrix(n_,1,0);
  input(NLP_LBX)         = DMatrix(n_,1,-inf);
  input(NLP_UBX)         = DMatrix(n_,1, inf);
  input(NLP_LBG)         = DMatrix(m_,1,-inf);
  input(NLP_UBG)         = DMatrix(m_,1, inf);
  input(NLP_LAMBDA_INIT) = DMatrix(m_,1,0);
  if (parametric_) input(NLP_P) = DMatrix(sp_p,0);
  
  // Allocate space for outputs
  output_.resize(NLP_NUM_OUT);
  output(NLP_X_OPT)      = DMatrix(n_,1,0);
  output(NLP_COST)       = DMatrix(1,1,0);
  output(NLP_LAMBDA_X)   = DMatrix(n_,1,0);
  output(NLP_LAMBDA_G)   = DMatrix(m_,1,0);
  output(NLP_G)          = DMatrix(m_,1,0);
  
  if (hasSetOption("iteration_callback")) {
   callback_ = getOption("iteration_callback");
   if (!callback_.isNull()) {
     if (!callback_.isInit()) callback_.init();
     casadi_assert_message(callback_.getNumOutputs()==1, "Callback function should have one output, a scalar that indicates wether to break. 0 = continue");
     casadi_assert_message(callback_.output(0).size()==1, "Callback function should have one output, a scalar that indicates wether to break. 0 = continue");
     casadi_assert_message(callback_.getNumInputs()==NLP_NUM_OUT, "Callback function should have the output scheme of NLPSolver as input scheme. i.e. " <<NLP_NUM_OUT << " inputs instead of the " << callback_.getNumInputs() << " you provided." );
     for (int i=0;i<NLP_NUM_OUT;i++) {
       casadi_assert_message(callback_.input(i).sparsity()==output(i).sparsity(),
         "Callback function should have the output scheme of NLPSolver as input scheme. " << 
         "Input #" << i << " (" << getSchemeEntryEnumName(SCHEME_NLPOutput,i) <<  " aka '" << getSchemeEntryName(SCHEME_NLPOutput,i) << "') was found to be " << callback_.input(i).dimString() << " instead of expected " << output(i).dimString() << "."
       );
       callback_.input(i).setAll(0);
     }
   }
  }
  
  callback_step_ = getOption("iteration_callback_step");

  // Call the initialization method of the base class
  FXInternal::init();
}
Ejemplo n.º 2
0
void LiftedSQPInternal::init(){
  // Call the init method of the base class
  NlpSolverInternal::init();

  // Number of lifted variables
  nv = getOption("num_lifted");
  if(verbose_){
    cout << "Initializing SQP method with " << nx_ << " variables and " << ng_ << " constraints." << endl;
    cout << "Lifting " << nv << " variables." << endl;
    if(gauss_newton_){
      cout << "Gauss-Newton objective with " << F_.input().numel() << " terms." << endl;
    }
  }
  
  // Read options
  max_iter_ = getOption("max_iter");
  max_iter_ls_ = getOption("max_iter_ls");
  toldx_ = getOption("toldx");
  tolgl_ = getOption("tolgl");
  sigma_ = getOption("sigma");
  rho_ = getOption("rho");
  mu_safety_ = getOption("mu_safety");
  eta_ = getOption("eta");
  tau_ = getOption("tau");
    
  // Assume SXFunction for now
  SXFunction ffcn = shared_cast<SXFunction>(F_);
  casadi_assert(!ffcn.isNull());
  SXFunction gfcn = shared_cast<SXFunction>(G_);
  casadi_assert(!gfcn.isNull());
  
  // Extract the free variables and split into independent and dependent variables
  SX x = ffcn.inputExpr(0);
  int nx = x.size();
  nu = nx-nv;
  SX u = x[Slice(0,nu)];
  SX v = x[Slice(nu,nu+nv)];

  // Extract the constraint equations and split into constraints and definitions of dependent variables
  SX f1 = ffcn.outputExpr(0);
  int nf1 = f1.numel();
  SX g = gfcn.outputExpr(0);
  int nf2 = g.numel()-nv;
  SX v_eq = g(Slice(0,nv));
  SX f2 = g(Slice(nv,nv+nf2));
  
  // Definition of v
  SX v_def = v_eq + v;

  // Objective function
  SX f;
  
  // Multipliers
  SX lam_x, lam_g, lam_f2;
  if(gauss_newton_){
    
    // Least square objective
    f = inner_prod(f1,f1)/2;
    
  } else {
    
    // Scalar objective function
    f = f1;
    
    // Lagrange multipliers for the simple bounds on u
    SX lam_u = ssym("lam_u",nu);
    
    // Lagrange multipliers for the simple bounds on v
    SX lam_v = ssym("lam_v",nv);
    
    // Lagrange multipliers for the simple bounds on x
    lam_x = vertcat(lam_u,lam_v);

    // Lagrange multipliers corresponding to the definition of the dependent variables
    SX lam_v_eq = ssym("lam_v_eq",nv);

    // Lagrange multipliers for the nonlinear constraints that aren't eliminated
    lam_f2 = ssym("lam_f2",nf2);

    if(verbose_){
      cout << "Allocated intermediate variables." << endl;
    }
    
    // Lagrange multipliers for constraints
    lam_g = vertcat(lam_v_eq,lam_f2);
    
    // Lagrangian function
    SX lag = f + inner_prod(lam_x,x);
    if(!f2.empty()) lag += inner_prod(lam_f2,f2);
    if(!v.empty()) lag += inner_prod(lam_v_eq,v_def);
    
    // Gradient of the Lagrangian
    SX lgrad = casadi::gradient(lag,x);
    if(!v.empty()) lgrad -= vertcat(SX::zeros(nu),lam_v_eq); // Put here to ensure that lgrad is of the form "h_extended -v_extended"
    makeDense(lgrad);
    if(verbose_){
      cout << "Generated the gradient of the Lagrangian." << endl;
    }

    // Condensed gradient of the Lagrangian
    f1 = lgrad[Slice(0,nu)];
    nf1 = nu;
    
    // Gradient of h
    SX v_eq_grad = lgrad[Slice(nu,nu+nv)];
    
    // Reverse lam_v_eq and v_eq_grad
    SX v_eq_grad_reversed = v_eq_grad;
    copy(v_eq_grad.rbegin(),v_eq_grad.rend(),v_eq_grad_reversed.begin());
    SX lam_v_eq_reversed = lam_v_eq;
    copy(lam_v_eq.rbegin(),lam_v_eq.rend(),lam_v_eq_reversed.begin());
    
    // Augment h and lam_v_eq
    v_eq.append(v_eq_grad_reversed);
    v.append(lam_v_eq_reversed);
  }

  // Residual function G
  SXVector G_in(G_NUM_IN);
  G_in[G_X] = x;
  G_in[G_LAM_X] = lam_x;
  G_in[G_LAM_G] = lam_g;

  SXVector G_out(G_NUM_OUT);
  G_out[G_D] = v_eq;
  G_out[G_G] = g;
  G_out[G_F] = f;

  rfcn_ = SXFunction(G_in,G_out);
  rfcn_.setOption("number_of_fwd_dir",0);
  rfcn_.setOption("number_of_adj_dir",0);
  rfcn_.setOption("live_variables",true);
  rfcn_.init();
  if(verbose_){
    cout << "Generated residual function ( " << shared_cast<SXFunction>(rfcn_).getAlgorithmSize() << " nodes)." << endl;
  }
  
  // Difference vector d
  SX d = ssym("d",nv);
  if(!gauss_newton_){
    vector<SX> dg = ssym("dg",nv).data();
    reverse(dg.begin(),dg.end());
    d.append(dg);
  }

  // Substitute out the v from the h
  SX d_def = (v_eq + v)-d;
  SXVector ex(3);
  ex[0] = f1;
  ex[1] = f2;
  ex[2] = f;
  substituteInPlace(v, d_def, ex, false);
  SX f1_z = ex[0];
  SX f2_z = ex[1];
  SX f_z = ex[2];
  
  // Modified function Z
  enum ZIn{Z_U,Z_D,Z_LAM_X,Z_LAM_F2,Z_NUM_IN};
  SXVector zfcn_in(Z_NUM_IN);
  zfcn_in[Z_U] = u;
  zfcn_in[Z_D] = d;
  zfcn_in[Z_LAM_X] = lam_x;
  zfcn_in[Z_LAM_F2] = lam_f2;
  
  enum ZOut{Z_D_DEF,Z_F12,Z_NUM_OUT};
  SXVector zfcn_out(Z_NUM_OUT);
  zfcn_out[Z_D_DEF] = d_def;
  zfcn_out[Z_F12] = vertcat(f1_z,f2_z);
  
  SXFunction zfcn(zfcn_in,zfcn_out);
  zfcn.init();
  if(verbose_){
    cout << "Generated reconstruction function ( " << zfcn.getAlgorithmSize() << " nodes)." << endl;
  }

  // Matrix A and B in lifted Newton
  SX B = zfcn.jac(Z_U,Z_F12);
  SX B1 = B(Slice(0,nf1),Slice(0,B.size2()));
  SX B2 = B(Slice(nf1,B.size1()),Slice(0,B.size2()));
  if(verbose_){
    cout << "Formed B1 (dimension " << B1.size1() << "-by-" << B1.size2() << ", "<< B1.size() << " nonzeros) " <<
    "and B2 (dimension " << B2.size1() << "-by-" << B2.size2() << ", "<< B2.size() << " nonzeros)." << endl;
  }
  
  // Step in u
  SX du = ssym("du",nu);
  SX dlam_f2 = ssym("dlam_f2",lam_f2.sparsity());
  
  SX b1 = f1_z;
  SX b2 = f2_z;
  SX e;
  if(nv > 0){
    
    // Directional derivative of Z
    vector<vector<SX> > Z_fwdSeed(2,zfcn_in);
    vector<vector<SX> > Z_fwdSens(2,zfcn_out);
    vector<vector<SX> > Z_adjSeed;
    vector<vector<SX> > Z_adjSens;
    
    Z_fwdSeed[0][Z_U].setZero();
    Z_fwdSeed[0][Z_D] = -d;
    Z_fwdSeed[0][Z_LAM_X].setZero();
    Z_fwdSeed[0][Z_LAM_F2].setZero();
    
    Z_fwdSeed[1][Z_U] = du;
    Z_fwdSeed[1][Z_D] = -d;
    Z_fwdSeed[1][Z_LAM_X].setZero();
    Z_fwdSeed[1][Z_LAM_F2] = dlam_f2;
    
    zfcn.eval(zfcn_in,zfcn_out,Z_fwdSeed,Z_fwdSens,Z_adjSeed,Z_adjSens);
    
    b1 += Z_fwdSens[0][Z_F12](Slice(0,nf1));
    b2 += Z_fwdSens[0][Z_F12](Slice(nf1,B.size1()));
    e = Z_fwdSens[1][Z_D_DEF];
  }
  if(verbose_){
    cout << "Formed b1 (dimension " << b1.size1() << "-by-" << b1.size2() << ", "<< b1.size() << " nonzeros) " <<
    "and b2 (dimension " << b2.size1() << "-by-" << b2.size2() << ", "<< b2.size() << " nonzeros)." << endl;
  }
  
  // Generate Gauss-Newton Hessian
  if(gauss_newton_){
    b1 = mul(trans(B1),b1);
    B1 = mul(trans(B1),B1);
    if(verbose_){
      cout << "Gauss Newton Hessian (dimension " << B1.size1() << "-by-" << B1.size2() << ", "<< B1.size() << " nonzeros)." << endl;
    }
  }
  
  // Make sure b1 and b2 are dense vectors
  makeDense(b1);
  makeDense(b2);
  
  // Quadratic approximation
  SXVector lfcn_in(LIN_NUM_IN);
  lfcn_in[LIN_X] = x;
  lfcn_in[LIN_D] = d;
  lfcn_in[LIN_LAM_X] = lam_x;
  lfcn_in[LIN_LAM_G] = lam_g;
  
  SXVector lfcn_out(LIN_NUM_OUT);
  lfcn_out[LIN_F1] = b1;
  lfcn_out[LIN_J1] = B1;
  lfcn_out[LIN_F2] = b2;
  lfcn_out[LIN_J2] = B2;
  lfcn_ = SXFunction(lfcn_in,lfcn_out);
//   lfcn_.setOption("verbose",true);
  lfcn_.setOption("number_of_fwd_dir",0);
  lfcn_.setOption("number_of_adj_dir",0);
  lfcn_.setOption("live_variables",true);
  lfcn_.init();
  if(verbose_){
    cout << "Generated linearization function ( " << shared_cast<SXFunction>(lfcn_).getAlgorithmSize() << " nodes)." << endl;
  }
    
  // Step expansion
  SXVector efcn_in(EXP_NUM_IN);
  copy(lfcn_in.begin(),lfcn_in.end(),efcn_in.begin());
  efcn_in[EXP_DU] = du;
  efcn_in[EXP_DLAM_F2] = dlam_f2;
  efcn_ = SXFunction(efcn_in,e);
  efcn_.setOption("number_of_fwd_dir",0);
  efcn_.setOption("number_of_adj_dir",0);
  efcn_.setOption("live_variables",true);
  efcn_.init();
  if(verbose_){
    cout << "Generated step expansion function ( " << shared_cast<SXFunction>(efcn_).getAlgorithmSize() << " nodes)." << endl;
  }
  
  // Current guess for the primal solution
  DMatrix &x_k = output(NLP_SOLVER_X);
  
  // Current guess for the dual solution
  DMatrix &lam_x_k = output(NLP_SOLVER_LAM_X);
  DMatrix &lam_g_k = output(NLP_SOLVER_LAM_G);

  // Allocate a QP solver
  QpSolverCreator qp_solver_creator = getOption("qp_solver");
  qp_solver_ = qp_solver_creator(B1.sparsity(),B2.sparsity());
  
  // Set options if provided
  if(hasSetOption("qp_solver_options")){
    Dictionary qp_solver_options = getOption("qp_solver_options");
    qp_solver_.setOption(qp_solver_options);
  }
  
  // Initialize the QP solver
  qp_solver_.init();
  if(verbose_){
    cout << "Allocated QP solver." << endl;
  }

  // Residual
  d_k_ = DMatrix(d.sparsity(),0);
  
  // Primal step
  dx_k_ = DMatrix(x_k.sparsity());

  // Dual step
  dlam_x_k_ = DMatrix(lam_x_k.sparsity());
  dlam_g_k_ = DMatrix(lam_g_k.sparsity());
  
}