コード例 #1
0
ファイル: IpRestoIpoptNLP.hpp プロジェクト: imzye/coin
 virtual SmartPtr<const SymMatrixSpace> HessianMatrixSpace() const
 {
     return GetRawPtr(h_space_);
 }
コード例 #2
0
ファイル: IpRestoIpoptNLP.hpp プロジェクト: imzye/coin
 /** Upper bounds on d */
 virtual SmartPtr<const Vector> d_U() const
 {
     return GetRawPtr(d_U_);
 }
コード例 #3
0
ファイル: IpRestoIpoptNLP.hpp プロジェクト: imzye/coin
 /** Permutation matrix (d_U_ -> d */
 virtual SmartPtr<const Matrix> Pd_U() const
 {
     return GetRawPtr(Pd_U_);
 }
コード例 #4
0
  void RestoIterationOutput::WriteOutput()
  {
    // Get pointers to the Original NLP objects
    const RestoIpoptNLP* resto_ipopt_nlp =
      static_cast<const RestoIpoptNLP*>(&IpNLP());
    DBG_ASSERT(resto_ipopt_nlp);

    SmartPtr<IpoptData> orig_ip_data = &resto_ipopt_nlp->OrigIpData();
    SmartPtr<IpoptNLP> orig_ip_nlp = &resto_ipopt_nlp->OrigIpNLP();
    SmartPtr<IpoptCalculatedQuantities> orig_ip_cq =
      &resto_ipopt_nlp->OrigIpCq();

    // Set the iteration counter for the original NLP to the current value
    Index iter = IpData().iter_count();
    orig_ip_data->Set_iter_count(iter);

    // If a resto_orig_iteration_output object was given, first do the
    // WriteOutput method with that one
    if (IsValid(resto_orig_iteration_output_)) {
      resto_orig_iteration_output_->WriteOutput();
    }

    //////////////////////////////////////////////////////////////////////
    //         First print the summary line for the iteration           //
    //////////////////////////////////////////////////////////////////////

    std::string header =
      "iter    objective    inf_pr   inf_du lg(mu)  ||d||  lg(rg) alpha_du alpha_pr  ls\n";
    Jnlst().Printf(J_DETAILED, J_MAIN,
                   "\n\n**************************************************\n");
    Jnlst().Printf(J_DETAILED, J_MAIN,
                   "*** Summary of Iteration %d for original NLP:", IpData().iter_count());
    Jnlst().Printf(J_DETAILED, J_MAIN,
                   "\n**************************************************\n\n");
    if (IpData().info_iters_since_header() >= 10 && !IsValid(resto_orig_iteration_output_)) {
      // output the header
      Jnlst().Printf(J_ITERSUMMARY, J_MAIN, header.c_str());
      IpData().Set_info_iters_since_header(0);
    }
    else {
      Jnlst().Printf(J_DETAILED, J_MAIN, header.c_str());
    }

    // For now, just print the total NLP error for the restoration
    // phase problem in the dual infeasibility column
    Number inf_du =
      IpCq().curr_dual_infeasibility(NORM_MAX);

    Number mu = IpData().curr_mu();
    Number dnrm = 0.;
    if (IsValid(IpData().delta()) && IsValid(IpData().delta()->x()) && IsValid(IpData().delta()->s())) {
      dnrm = Max(IpData().delta()->x()->Amax(), IpData().delta()->s()->Amax());
    }

    // Set  the trial  values  for  the original  Data  object to  the
    // current restoration phase values
    SmartPtr<const Vector> x = IpData().curr()->x();
    const CompoundVector* cx =
      static_cast<const CompoundVector*>(GetRawPtr(x));
    DBG_ASSERT(dynamic_cast<const CompoundVector*>(GetRawPtr(x)));
    SmartPtr<const Vector> s = IpData().curr()->s();
    const CompoundVector* cs =
      static_cast<const CompoundVector*>(GetRawPtr(s));
    DBG_ASSERT(dynamic_cast<const CompoundVector*>(GetRawPtr(s)));

    SmartPtr<IteratesVector> trial = orig_ip_data->trial()->MakeNewContainer();
    trial->Set_x(*cx->GetComp(0));
    trial->Set_s(*cs->GetComp(0));
    orig_ip_data->set_trial(trial);

    // Compute primal infeasibility
    Number inf_pr = 0.0;
    switch (inf_pr_output_) {
    case INTERNAL:
      inf_pr = orig_ip_cq->trial_primal_infeasibility(NORM_MAX);
      break;
    case ORIGINAL:
      inf_pr = orig_ip_cq->unscaled_trial_nlp_constraint_violation(NORM_MAX);
      break;
    }
    // Compute original objective function
    Number f = orig_ip_cq->unscaled_trial_f();

    // Retrieve some information set in the different parts of the algorithm
    char info_iter='r';

    Number alpha_primal = IpData().info_alpha_primal();
    char alpha_primal_char = IpData().info_alpha_primal_char();
    Number alpha_dual = IpData().info_alpha_dual();
    Number regu_x = IpData().info_regu_x();
    char regu_x_buf[8];
    char dashes[]="   - ";
    char *regu_x_ptr;
    if (regu_x==.0) {
      regu_x_ptr = dashes;
    }
    else {
      Snprintf(regu_x_buf, 7, "%5.1f", log10(regu_x));
      regu_x_ptr = regu_x_buf;
    }
    Index ls_count = IpData().info_ls_count();
    const std::string info_string = IpData().info_string();

    Number current_time = 0.0;
    Number last_output = IpData().info_last_output();
    if ((iter % print_frequency_iter_) == 0 &&
        (print_frequency_time_ == 0.0 || last_output < (current_time = WallclockTime()) - print_frequency_time_ || last_output < 0.0)) {
      Jnlst().Printf(J_ITERSUMMARY, J_MAIN,
                     "%4d%c%14.7e %7.2e %7.2e %5.1f %7.2e %5s %7.2e %7.2e%c%3d",
                     iter, info_iter, f, inf_pr, inf_du, log10(mu), dnrm, regu_x_ptr,
                     alpha_dual, alpha_primal, alpha_primal_char,
                    ls_count);
      if (print_info_string_) {
        Jnlst().Printf(J_ITERSUMMARY, J_MAIN, " %s", info_string.c_str());
      }
      else {
        Jnlst().Printf(J_DETAILED, J_MAIN, " %s", info_string.c_str());
      }
      Jnlst().Printf(J_ITERSUMMARY, J_MAIN, "\n");

      IpData().Set_info_last_output(current_time);
      IpData().Inc_info_iters_since_header();
    }

    //////////////////////////////////////////////////////////////////////
    //           Now if desired more detail on the iterates             //
    //////////////////////////////////////////////////////////////////////

    if (Jnlst().ProduceOutput(J_DETAILED, J_MAIN)) {
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "\n**************************************************\n");
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "*** Beginning Iteration %d from the following point:",
                     IpData().iter_count());
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "\n**************************************************\n\n");

      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "Primal infeasibility for restoration phase problem = %.16e\n",
                     IpCq().curr_primal_infeasibility(NORM_MAX));
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "Dual infeasibility for restoration phase problem   = %.16e\n",
                     IpCq().curr_dual_infeasibility(NORM_MAX));

      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "||curr_x||_inf   = %.16e\n", IpData().curr()->x()->Amax());
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "||curr_s||_inf   = %.16e\n", IpData().curr()->s()->Amax());
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "||curr_y_c||_inf = %.16e\n", IpData().curr()->y_c()->Amax());
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "||curr_y_d||_inf = %.16e\n", IpData().curr()->y_d()->Amax());
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "||curr_z_L||_inf = %.16e\n", IpData().curr()->z_L()->Amax());
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "||curr_z_U||_inf = %.16e\n", IpData().curr()->z_U()->Amax());
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "||curr_v_L||_inf = %.16e\n", IpData().curr()->v_L()->Amax());
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "||curr_v_U||_inf = %.16e\n", IpData().curr()->v_U()->Amax());
    }
    if (Jnlst().ProduceOutput(J_MOREDETAILED, J_MAIN)) {
      if (IsValid(IpData().delta())) {
        Jnlst().Printf(J_MOREDETAILED, J_MAIN,
                       "\n||delta_x||_inf   = %.16e\n", IpData().delta()->x()->Amax());
        Jnlst().Printf(J_MOREDETAILED, J_MAIN,
                       "||delta_s||_inf   = %.16e\n", IpData().delta()->s()->Amax());
        Jnlst().Printf(J_MOREDETAILED, J_MAIN,
                       "||delta_y_c||_inf = %.16e\n", IpData().delta()->y_c()->Amax());
        Jnlst().Printf(J_MOREDETAILED, J_MAIN,
                       "||delta_y_d||_inf = %.16e\n", IpData().delta()->y_d()->Amax());
        Jnlst().Printf(J_MOREDETAILED, J_MAIN,
                       "||delta_z_L||_inf = %.16e\n", IpData().delta()->z_L()->Amax());
        Jnlst().Printf(J_MOREDETAILED, J_MAIN,
                       "||delta_z_U||_inf = %.16e\n", IpData().delta()->z_U()->Amax());
        Jnlst().Printf(J_MOREDETAILED, J_MAIN,
                       "||delta_v_L||_inf = %.16e\n", IpData().delta()->v_L()->Amax());
        Jnlst().Printf(J_MOREDETAILED, J_MAIN,
                       "||delta_v_U||_inf = %.16e\n", IpData().delta()->v_U()->Amax());
      }
      else {
        Jnlst().Printf(J_MOREDETAILED, J_MAIN,
                       "\nNo search direction has been computed yet.\n");
      }
    }
    if (Jnlst().ProduceOutput(J_VECTOR, J_MAIN)) {
      IpData().curr()->x()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_x");
      IpData().curr()->s()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_s");

      IpData().curr()->y_c()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_y_c");
      IpData().curr()->y_d()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_y_d");

      IpCq().curr_slack_x_L()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_slack_x_L");
      IpCq().curr_slack_x_U()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_slack_x_U");
      IpData().curr()->z_L()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_z_L");
      IpData().curr()->z_U()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_z_U");

      IpCq().curr_slack_s_L()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_slack_s_L");
      IpCq().curr_slack_s_U()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_slack_s_U");
      IpData().curr()->v_L()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_v_L");
      IpData().curr()->v_U()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_v_U");
    }
    if (Jnlst().ProduceOutput(J_MOREVECTOR, J_MAIN)) {
      IpCq().curr_grad_lag_x()->Print(Jnlst(), J_MOREVECTOR, J_MAIN, "curr_grad_lag_x");
      IpCq().curr_grad_lag_s()->Print(Jnlst(), J_MOREVECTOR, J_MAIN, "curr_grad_lag_s");
      if (IsValid(IpData().delta())) {
        IpData().delta()->Print(Jnlst(), J_MOREVECTOR, J_MAIN, "delta");
      }
    }

    if (Jnlst().ProduceOutput(J_DETAILED, J_MAIN)) {
      Jnlst().Printf(J_DETAILED, J_MAIN,
                     "\n\n***Current NLP Values for Iteration (Restoration phase problem) %d:\n",
                     IpData().iter_count());
      Jnlst().Printf(J_DETAILED, J_MAIN, "\n                                   (scaled)                 (unscaled)\n");
      Jnlst().Printf(J_DETAILED, J_MAIN, "Objective...............: %24.16e  %24.16e\n", IpCq().curr_f(), IpCq().unscaled_curr_f());
      Jnlst().Printf(J_DETAILED, J_MAIN, "Dual infeasibility......: %24.16e  %24.16e\n", IpCq().curr_dual_infeasibility(NORM_MAX), IpCq().unscaled_curr_dual_infeasibility(NORM_MAX));
      Jnlst().Printf(J_DETAILED, J_MAIN, "Constraint violation....: %24.16e  %24.16e\n", IpCq().curr_nlp_constraint_violation(NORM_MAX), IpCq().unscaled_curr_nlp_constraint_violation(NORM_MAX));
      Jnlst().Printf(J_DETAILED, J_MAIN, "Complementarity.........: %24.16e  %24.16e\n", IpCq().curr_complementarity(0., NORM_MAX), IpCq().unscaled_curr_complementarity(0., NORM_MAX));
      Jnlst().Printf(J_DETAILED, J_MAIN, "Overall NLP error.......: %24.16e  %24.16e\n\n", IpCq().curr_nlp_error(), IpCq().unscaled_curr_nlp_error());
    }
    if (Jnlst().ProduceOutput(J_VECTOR, J_MAIN)) {
      IpCq().curr_grad_f()->Print(Jnlst(), J_VECTOR, J_MAIN, "grad_f");
      IpCq().curr_c()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_c");
      IpCq().curr_d()->Print(Jnlst(), J_VECTOR, J_MAIN, "curr_d");
      IpCq().curr_d_minus_s()->Print(Jnlst(), J_VECTOR, J_MAIN,
                                     "curr_d - curr_s");
    }

    if (Jnlst().ProduceOutput(J_MATRIX, J_MAIN)) {
      IpCq().curr_jac_c()->Print(Jnlst(), J_MATRIX, J_MAIN, "jac_c");
      IpCq().curr_jac_d()->Print(Jnlst(), J_MATRIX, J_MAIN, "jac_d");
      IpData().W()->Print(Jnlst(), J_MATRIX, J_MAIN, "W");
    }

    Jnlst().Printf(J_DETAILED, J_MAIN, "\n\n");
  }
コード例 #5
0
  bool RestoIterateInitializer::SetInitialIterates()
  {
    DBG_START_METH("RestoIterateInitializer::SetInitialIterates",
                   dbg_verbosity);

    // Get a grip on the restoration phase NLP and obtain the pointers
    // to the original NLP data
    SmartPtr<RestoIpoptNLP> resto_ip_nlp =
      static_cast<RestoIpoptNLP*> (&IpNLP());
    SmartPtr<IpoptNLP> orig_ip_nlp =
      static_cast<IpoptNLP*> (&resto_ip_nlp->OrigIpNLP());
    SmartPtr<IpoptData> orig_ip_data =
      static_cast<IpoptData*> (&resto_ip_nlp->OrigIpData());
    SmartPtr<IpoptCalculatedQuantities> orig_ip_cq =
      static_cast<IpoptCalculatedQuantities*> (&resto_ip_nlp->OrigIpCq());

    // Set the value of the barrier parameter
    Number resto_mu;
    resto_mu = Max(orig_ip_data->curr_mu(),
                   orig_ip_cq->curr_c()->Amax(),
                   orig_ip_cq->curr_d_minus_s()->Amax());
    IpData().Set_mu(resto_mu);
    Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
                   "Initial barrier parameter resto_mu = %e\n", resto_mu);

    /////////////////////////////////////////////////////////////////////
    //                   Initialize primal varialbes                   //
    /////////////////////////////////////////////////////////////////////

    // initialize the data structures in the restoration phase NLP
    IpData().InitializeDataStructures(IpNLP(), false, false, false,
                                      false, false);

    SmartPtr<Vector> new_x = IpData().curr()->x()->MakeNew();
    SmartPtr<CompoundVector> Cnew_x =
      static_cast<CompoundVector*> (GetRawPtr(new_x));
    DBG_ASSERT(dynamic_cast<CompoundVector*> (GetRawPtr(new_x)));

    // Set the trial x variables from the original NLP
    Cnew_x->GetCompNonConst(0)->Copy(*orig_ip_data->curr()->x());

    // Compute the initial values for the n and p variables for the
    // equality constraints
    Number rho = resto_ip_nlp->Rho();
    DBG_PRINT((1,"rho = %e\n", rho));
    SmartPtr<Vector> nc = Cnew_x->GetCompNonConst(1);
    SmartPtr<Vector> pc = Cnew_x->GetCompNonConst(2);
    SmartPtr<const Vector> cvec = orig_ip_cq->curr_c();
    DBG_PRINT_VECTOR(2, "cvec", *cvec);
    SmartPtr<Vector> a = nc->MakeNew();
    SmartPtr<Vector> b = nc->MakeNew();
    a->Set(resto_mu/(2.*rho));
    a->Axpy(-0.5, *cvec);
    b->Copy(*cvec);
    b->Scal(resto_mu/(2.*rho));
    DBG_PRINT_VECTOR(2, "a", *a);
    DBG_PRINT_VECTOR(2, "b", *b);
    solve_quadratic(*a, *b, *nc);
    pc->Copy(*cvec);
    pc->Axpy(1., *nc);
    DBG_PRINT_VECTOR(2, "nc", *nc);
    DBG_PRINT_VECTOR(2, "pc", *pc);

    // initial values for the n and p variables for the inequality
    // constraints
    SmartPtr<Vector> nd = Cnew_x->GetCompNonConst(3);
    SmartPtr<Vector> pd = Cnew_x->GetCompNonConst(4);
    cvec = orig_ip_cq->curr_d_minus_s();
    a = nd->MakeNew();
    b = nd->MakeNew();
    a->Set(resto_mu/(2.*rho));
    a->Axpy(-0.5, *cvec);
    b->Copy(*cvec);
    b->Scal(resto_mu/(2.*rho));
    solve_quadratic(*a, *b, *nd);
    pd->Copy(*cvec);
    pd->Axpy(1., *nd);
    DBG_PRINT_VECTOR(2, "nd", *nd);
    DBG_PRINT_VECTOR(2, "pd", *pd);

    // Leave the slacks unchanged
    SmartPtr<Vector> new_s = IpData().curr()->s()->MakeNew();
    SmartPtr<CompoundVector> Cnew_s =
      static_cast<CompoundVector*> (GetRawPtr(new_s));
    DBG_ASSERT(dynamic_cast<CompoundVector*> (GetRawPtr(new_s)));
    DBG_ASSERT(Cnew_s->NComps() == 1);

    // Set the trial s variables from the original NLP
    Cnew_s->GetCompNonConst(0)->Copy(*orig_ip_data->curr()->s());

    // Now set the primal trial variables
    DBG_PRINT_VECTOR(2,"new_s",*new_s);
    DBG_PRINT_VECTOR(2,"new_x",*new_x);
    SmartPtr<IteratesVector> trial = IpData().curr()->MakeNewContainer();
    trial->Set_primal(*new_x, *new_s);
    IpData().set_trial(trial);

    DBG_PRINT_VECTOR(2, "resto_c", *IpCq().trial_c());
    DBG_PRINT_VECTOR(2, "resto_d_minus_s", *IpCq().trial_d_minus_s());

    /////////////////////////////////////////////////////////////////////
    //                   Initialize bound multipliers                  //
    /////////////////////////////////////////////////////////////////////

    SmartPtr<Vector> new_z_L = IpData().curr()->z_L()->MakeNew();
    SmartPtr<CompoundVector> Cnew_z_L =
      static_cast<CompoundVector*> (GetRawPtr(new_z_L));
    DBG_ASSERT(dynamic_cast<CompoundVector*> (GetRawPtr(new_z_L)));
    SmartPtr<Vector> new_z_U = IpData().curr()->z_U()->MakeNew();
    SmartPtr<CompoundVector> Cnew_z_U =
      static_cast<CompoundVector*> (GetRawPtr(new_z_U));
    DBG_ASSERT(dynamic_cast<CompoundVector*> (GetRawPtr(new_z_U)));
    DBG_ASSERT(Cnew_z_U->NComps() == 1);
    SmartPtr<Vector> new_v_L = IpData().curr()->v_L()->MakeNew();
    SmartPtr<CompoundVector> Cnew_v_L =
      static_cast<CompoundVector*> (GetRawPtr(new_v_L));
    DBG_ASSERT(dynamic_cast<CompoundVector*> (GetRawPtr(new_v_L)));
    DBG_ASSERT(Cnew_v_L->NComps() == 1);
    SmartPtr<Vector> new_v_U = IpData().curr()->v_U()->MakeNew();
    SmartPtr<CompoundVector> Cnew_v_U =
      static_cast<CompoundVector*> (GetRawPtr(new_v_U));
    DBG_ASSERT(dynamic_cast<CompoundVector*> (GetRawPtr(new_v_U)));
    DBG_ASSERT(Cnew_v_U->NComps() == 1);

    // multipliers for the original bounds are
    SmartPtr<const Vector> orig_z_L = orig_ip_data->curr()->z_L();
    SmartPtr<const Vector> orig_z_U = orig_ip_data->curr()->z_U();
    SmartPtr<const Vector> orig_v_L = orig_ip_data->curr()->v_L();
    SmartPtr<const Vector> orig_v_U = orig_ip_data->curr()->v_U();

    // Set the new multipliers to the min of the penalty parameter Rho
    // and their current value
    SmartPtr<Vector> Cnew_z_L0 = Cnew_z_L->GetCompNonConst(0);
    Cnew_z_L0->Set(rho);
    Cnew_z_L0->ElementWiseMin(*orig_z_L);
    SmartPtr<Vector> Cnew_z_U0 = Cnew_z_U->GetCompNonConst(0);
    Cnew_z_U0->Set(rho);
    Cnew_z_U0->ElementWiseMin(*orig_z_U);
    SmartPtr<Vector> Cnew_v_L0 = Cnew_v_L->GetCompNonConst(0);
    Cnew_v_L0->Set(rho);
    Cnew_v_L0->ElementWiseMin(*orig_v_L);
    SmartPtr<Vector> Cnew_v_U0 = Cnew_v_U->GetCompNonConst(0);
    Cnew_v_U0->Set(rho);
    Cnew_v_U0->ElementWiseMin(*orig_v_U);

    // Set the multipliers for the p and n bounds to the "primal" multipliers
    SmartPtr<Vector> Cnew_z_L1 = Cnew_z_L->GetCompNonConst(1);
    Cnew_z_L1->Set(resto_mu);
    Cnew_z_L1->ElementWiseDivide(*nc);
    SmartPtr<Vector> Cnew_z_L2 = Cnew_z_L->GetCompNonConst(2);
    Cnew_z_L2->Set(resto_mu);
    Cnew_z_L2->ElementWiseDivide(*pc);
    SmartPtr<Vector> Cnew_z_L3 = Cnew_z_L->GetCompNonConst(3);
    Cnew_z_L3->Set(resto_mu);
    Cnew_z_L3->ElementWiseDivide(*nd);
    SmartPtr<Vector> Cnew_z_L4 = Cnew_z_L->GetCompNonConst(4);
    Cnew_z_L4->Set(resto_mu);
    Cnew_z_L4->ElementWiseDivide(*pd);

    // Set those initial values to be the trial values in Data
    trial = IpData().trial()->MakeNewContainer();
    trial->Set_bound_mult(*new_z_L, *new_z_U, *new_v_L, *new_v_U);
    IpData().set_trial(trial);

    /////////////////////////////////////////////////////////////////////
    //           Initialize equality constraint multipliers            //
    /////////////////////////////////////////////////////////////////////

    DefaultIterateInitializer::least_square_mults(
      Jnlst(), IpNLP(), IpData(), IpCq(),
      resto_eq_mult_calculator_, constr_mult_init_max_);

    // upgrade the trial to the current point
    IpData().AcceptTrialPoint();

    DBG_PRINT_VECTOR(2, "y_c", *IpData().curr()->y_c());
    DBG_PRINT_VECTOR(2, "y_d", *IpData().curr()->y_d());

    DBG_PRINT_VECTOR(2, "z_L", *IpData().curr()->z_L());
    DBG_PRINT_VECTOR(2, "z_U", *IpData().curr()->z_U());
    DBG_PRINT_VECTOR(2, "v_L", *IpData().curr()->v_L());
    DBG_PRINT_VECTOR(2, "v_U", *IpData().curr()->v_U());

    return true;
  }
コード例 #6
0
  void GradientScaling::DetermineScalingParametersImpl(
    const SmartPtr<const VectorSpace> x_space,
    const SmartPtr<const VectorSpace> c_space,
    const SmartPtr<const VectorSpace> d_space,
    const SmartPtr<const MatrixSpace> jac_c_space,
    const SmartPtr<const MatrixSpace> jac_d_space,
    const SmartPtr<const SymMatrixSpace> h_space,
    const Matrix& Px_L, const Vector& x_L,
    const Matrix& Px_U, const Vector& x_U,
    Number& df,
    SmartPtr<Vector>& dx,
    SmartPtr<Vector>& dc,
    SmartPtr<Vector>& dd)
  {
    DBG_ASSERT(IsValid(nlp_));

    SmartPtr<Vector> x = x_space->MakeNew();
    if (!nlp_->GetStartingPoint(GetRawPtr(x), true,
                                NULL, false,
                                NULL, false,
                                NULL, false,
                                NULL, false)) {
      THROW_EXCEPTION(FAILED_INITIALIZATION,
                      "Error getting initial point from NLP in GradientScaling.\n");
    }

    //
    // Calculate grad_f scaling
    //
    SmartPtr<Vector> grad_f = x_space->MakeNew();
    if (nlp_->Eval_grad_f(*x, *grad_f)) {
      double max_grad_f = grad_f->Amax();
      df = 1.;
      if (scaling_obj_target_gradient_ == 0.) {
        if (max_grad_f > scaling_max_gradient_) {
          df = scaling_max_gradient_ / max_grad_f;
        }
      }
      else {
        if (max_grad_f == 0.) {
          Jnlst().Printf(J_WARNING, J_INITIALIZATION,
                         "Gradient of objective function is zero at starting point.  Cannot determine scaling factor based on scaling_obj_target_gradient option.\n");
        }
        else {
          df = scaling_obj_target_gradient_ / max_grad_f;
        }
      }
      df = Max(df, scaling_min_value_);
      Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
                     "Scaling parameter for objective function = %e\n", df);
    }
    else {
      Jnlst().Printf(J_WARNING, J_INITIALIZATION,
                     "Error evaluating objective gradient at user provided starting point.\n  No scaling factor for objective function computed!\n");
      df = 1.;
    }
    //
    // No x scaling
    //
    dx = NULL;

    dc = NULL;
    if (c_space->Dim()>0) {
      //
      // Calculate c scaling
      //
      SmartPtr<Matrix> jac_c = jac_c_space->MakeNew();
      if (nlp_->Eval_jac_c(*x, *jac_c)) {
        dc = c_space->MakeNew();
        const double dbl_min = std::numeric_limits<double>::min();
        dc->Set(dbl_min);
        jac_c->ComputeRowAMax(*dc, false);
        Number arow_max = dc->Amax();
        if (scaling_constr_target_gradient_<=0.) {
          if (arow_max > scaling_max_gradient_) {
            dc->ElementWiseReciprocal();
            dc->Scal(scaling_max_gradient_);
            SmartPtr<Vector> dummy = dc->MakeNew();
            dummy->Set(1.);
            dc->ElementWiseMin(*dummy);
          }
          else {
            dc = NULL;
          }
        }
        else {
          dc->Set(scaling_constr_target_gradient_/arow_max);
        }
        if (IsValid(dc) && scaling_min_value_ > 0.) {
          SmartPtr<Vector> tmp = dc->MakeNew();
          tmp->Set(scaling_min_value_);
          dc->ElementWiseMax(*tmp);
        }
      }
      else {
        Jnlst().Printf(J_WARNING, J_INITIALIZATION,
                       "Error evaluating Jacobian of equality constraints at user provided starting point.\n  No scaling factors for equality constraints computed!\n");
      }
    }

    dd = NULL;
    if (d_space->Dim()>0) {
      //
      // Calculate d scaling
      //
      SmartPtr<Matrix> jac_d = jac_d_space->MakeNew();
      if (nlp_->Eval_jac_d(*x, *jac_d)) {
        dd = d_space->MakeNew();
        const double dbl_min = std::numeric_limits<double>::min();
        dd->Set(dbl_min);
        jac_d->ComputeRowAMax(*dd, false);
        Number arow_max = dd->Amax();
        if (scaling_constr_target_gradient_<=0.) {
          if (arow_max > scaling_max_gradient_) {
            dd->ElementWiseReciprocal();
            dd->Scal(scaling_max_gradient_);
            SmartPtr<Vector> dummy = dd->MakeNew();
            dummy->Set(1.);
            dd->ElementWiseMin(*dummy);
          }
          else {
            dd = NULL;
          }
        }
        else {
          dd->Set(scaling_constr_target_gradient_/arow_max);
        }
        if (IsValid(dd) && scaling_min_value_ > 0.) {
          SmartPtr<Vector> tmp = dd->MakeNew();
          tmp->Set(scaling_min_value_);
          dd->ElementWiseMax(*tmp);
        }
      }
      else {
        Jnlst().Printf(J_WARNING, J_INITIALIZATION,
                       "Error evaluating Jacobian of inequality constraints at user provided starting point.\n  No scaling factors for inequality constraints computed!\n");
      }
    }
  }
コード例 #7
0
bool RestoIpoptNLP::InitializeStructures(
   SmartPtr<Vector>& x,
   bool              init_x,
   SmartPtr<Vector>& y_c,
   bool              init_y_c,
   SmartPtr<Vector>& y_d,
   bool              init_y_d,
   SmartPtr<Vector>& z_L,
   bool              init_z_L,
   SmartPtr<Vector>& z_U,
   bool              init_z_U,
   SmartPtr<Vector>& v_L,
   SmartPtr<Vector>& v_U)
{
   DBG_START_METH("RestoIpoptNLP::InitializeStructures", 0); DBG_ASSERT(initialized_);
   ///////////////////////////////////////////////////////////
   // Get the vector/matrix spaces for the original problem //
   ///////////////////////////////////////////////////////////

   SmartPtr<const VectorSpace> orig_x_space;
   SmartPtr<const VectorSpace> orig_c_space;
   SmartPtr<const VectorSpace> orig_d_space;
   SmartPtr<const VectorSpace> orig_x_l_space;
   SmartPtr<const MatrixSpace> orig_px_l_space;
   SmartPtr<const VectorSpace> orig_x_u_space;
   SmartPtr<const MatrixSpace> orig_px_u_space;
   SmartPtr<const VectorSpace> orig_d_l_space;
   SmartPtr<const MatrixSpace> orig_pd_l_space;
   SmartPtr<const VectorSpace> orig_d_u_space;
   SmartPtr<const MatrixSpace> orig_pd_u_space;
   SmartPtr<const MatrixSpace> orig_jac_c_space;
   SmartPtr<const MatrixSpace> orig_jac_d_space;
   SmartPtr<const SymMatrixSpace> orig_h_space;

   orig_ip_nlp_->GetSpaces(orig_x_space, orig_c_space, orig_d_space, orig_x_l_space, orig_px_l_space, orig_x_u_space,
      orig_px_u_space, orig_d_l_space, orig_pd_l_space, orig_d_u_space, orig_pd_u_space, orig_jac_c_space,
      orig_jac_d_space, orig_h_space);

   // Create the restoration phase problem vector/matrix spaces, based
   // on the original spaces (pretty inconvenient with all the
   // matrix spaces, isn't it?!?)
   DBG_PRINT((1, "Creating the x_space_\n"));
   // vector x
   Index total_dim = orig_x_space->Dim() + 2 * orig_c_space->Dim() + 2 * orig_d_space->Dim();
   x_space_ = new CompoundVectorSpace(5, total_dim);
   x_space_->SetCompSpace(0, *orig_x_space);
   x_space_->SetCompSpace(1, *orig_c_space); // n_c
   x_space_->SetCompSpace(2, *orig_c_space); // p_c
   x_space_->SetCompSpace(3, *orig_d_space); // n_d
   x_space_->SetCompSpace(4, *orig_d_space); // p_d

   DBG_PRINT((1, "Setting the c_space_\n"));
   // vector c
   //c_space_ = orig_c_space;
   c_space_ = new CompoundVectorSpace(1, orig_c_space->Dim());
   c_space_->SetCompSpace(0, *orig_c_space);

   DBG_PRINT((1, "Setting the d_space_\n"));
   // vector d
   //d_space_ = orig_d_space;
   d_space_ = new CompoundVectorSpace(1, orig_d_space->Dim());
   d_space_->SetCompSpace(0, *orig_d_space);

   DBG_PRINT((1, "Creating the x_l_space_\n"));
   // vector x_L
   total_dim = orig_x_l_space->Dim() + 2 * orig_c_space->Dim() + 2 * orig_d_space->Dim();
   x_l_space_ = new CompoundVectorSpace(5, total_dim);
   x_l_space_->SetCompSpace(0, *orig_x_l_space);
   x_l_space_->SetCompSpace(1, *orig_c_space); // n_c >=0
   x_l_space_->SetCompSpace(2, *orig_c_space); // p_c >=0
   x_l_space_->SetCompSpace(3, *orig_d_space); // n_d >=0
   x_l_space_->SetCompSpace(4, *orig_d_space); // p_d >=0

   DBG_PRINT((1, "Setting the x_u_space_\n"));
   // vector x_U
   x_u_space_ = new CompoundVectorSpace(1, orig_x_u_space->Dim());
   x_u_space_->SetCompSpace(0, *orig_x_u_space);

   DBG_PRINT((1, "Creating the px_l_space_\n"));
   // matrix px_l
   Index total_rows = orig_x_space->Dim() + 2 * orig_c_space->Dim() + 2 * orig_d_space->Dim();
   Index total_cols = orig_x_l_space->Dim() + 2 * orig_c_space->Dim() + 2 * orig_d_space->Dim();
   px_l_space_ = new CompoundMatrixSpace(5, 5, total_rows, total_cols);
   px_l_space_->SetBlockRows(0, orig_x_space->Dim());
   px_l_space_->SetBlockRows(1, orig_c_space->Dim());
   px_l_space_->SetBlockRows(2, orig_c_space->Dim());
   px_l_space_->SetBlockRows(3, orig_d_space->Dim());
   px_l_space_->SetBlockRows(4, orig_d_space->Dim());
   px_l_space_->SetBlockCols(0, orig_x_l_space->Dim());
   px_l_space_->SetBlockCols(1, orig_c_space->Dim());
   px_l_space_->SetBlockCols(2, orig_c_space->Dim());
   px_l_space_->SetBlockCols(3, orig_d_space->Dim());
   px_l_space_->SetBlockCols(4, orig_d_space->Dim());

   px_l_space_->SetCompSpace(0, 0, *orig_px_l_space);
   // now setup the identity matrix
   // This could be changed to be something like...
   // px_l_space_->SetBlockToIdentity(1,1,1.0);
   // px_l_space_->SetBlockToIdentity(2,2,other_factor);
   // ... etc with some simple changes to the CompoundMatrixSpace
   // to allow this (space should auto create the matrices)
   //
   // for now, we use the new feature and set the true flag for this block
   // to say that the matrices should be auto_allocated
   SmartPtr<const MatrixSpace> identity_mat_space_nc = new IdentityMatrixSpace(orig_c_space->Dim());
   px_l_space_->SetCompSpace(1, 1, *identity_mat_space_nc, true);
   px_l_space_->SetCompSpace(2, 2, *identity_mat_space_nc, true);
   SmartPtr<const MatrixSpace> identity_mat_space_nd = new IdentityMatrixSpace(orig_d_space->Dim());
   px_l_space_->SetCompSpace(3, 3, *identity_mat_space_nd, true);
   px_l_space_->SetCompSpace(4, 4, *identity_mat_space_nd, true);

   DBG_PRINT((1, "Creating the px_u_space_\n"));
   // matrix px_u
   total_rows = orig_x_space->Dim() + 2 * orig_c_space->Dim() + 2 * orig_d_space->Dim();
   total_cols = orig_x_u_space->Dim();
   DBG_PRINT((1, "total_rows = %d, total_cols = %d\n", total_rows, total_cols));
   px_u_space_ = new CompoundMatrixSpace(5, 1, total_rows, total_cols);
   px_u_space_->SetBlockRows(0, orig_x_space->Dim());
   px_u_space_->SetBlockRows(1, orig_c_space->Dim());
   px_u_space_->SetBlockRows(2, orig_c_space->Dim());
   px_u_space_->SetBlockRows(3, orig_d_space->Dim());
   px_u_space_->SetBlockRows(4, orig_d_space->Dim());
   px_u_space_->SetBlockCols(0, orig_x_u_space->Dim());

   px_u_space_->SetCompSpace(0, 0, *orig_px_u_space);
   // other matrices are zero'ed out

   // vector d_L
   //d_l_space_ = orig_d_l_space;
   d_l_space_ = new CompoundVectorSpace(1, orig_d_l_space->Dim());
   d_l_space_->SetCompSpace(0, *orig_d_l_space);

   // vector d_U
   //d_u_space_ = orig_d_u_space;
   d_u_space_ = new CompoundVectorSpace(1, orig_d_u_space->Dim());
   d_u_space_->SetCompSpace(0, *orig_d_u_space);

   // matrix pd_L
   //pd_l_space_ = orig_pd_l_space;
   pd_l_space_ = new CompoundMatrixSpace(1, 1, orig_pd_l_space->NRows(), orig_pd_l_space->NCols());
   pd_l_space_->SetBlockRows(0, orig_pd_l_space->NRows());
   pd_l_space_->SetBlockCols(0, orig_pd_l_space->NCols());
   pd_l_space_->SetCompSpace(0, 0, *orig_pd_l_space);

   // matrix pd_U
   //pd_u_space_ = orig_pd_u_space;
   pd_u_space_ = new CompoundMatrixSpace(1, 1, orig_pd_u_space->NRows(), orig_pd_u_space->NCols());
   pd_u_space_->SetBlockRows(0, orig_pd_u_space->NRows());
   pd_u_space_->SetBlockCols(0, orig_pd_u_space->NCols());
   pd_u_space_->SetCompSpace(0, 0, *orig_pd_u_space);

   DBG_PRINT((1, "Creating the jac_c_space_\n"));
   // matrix jac_c
   total_rows = orig_c_space->Dim();
   total_cols = orig_x_space->Dim() + 2 * orig_c_space->Dim() + 2 * orig_d_space->Dim();
   jac_c_space_ = new CompoundMatrixSpace(1, 5, total_rows, total_cols);
   jac_c_space_->SetBlockRows(0, orig_c_space->Dim());
   jac_c_space_->SetBlockCols(0, orig_x_space->Dim());
   jac_c_space_->SetBlockCols(1, orig_c_space->Dim());
   jac_c_space_->SetBlockCols(2, orig_c_space->Dim());
   jac_c_space_->SetBlockCols(3, orig_d_space->Dim());
   jac_c_space_->SetBlockCols(4, orig_d_space->Dim());

   jac_c_space_->SetCompSpace(0, 0, *orig_jac_c_space);
   // **NOTE: By placing "flat" identity matrices here, we are creating
   //         potential issues for linalg operations that arise when the original
   //         NLP has a "compound" c_space. To avoid problems like this,
   //         we place all unmodified component spaces in trivial (size 1)
   //         "compound" spaces.
   jac_c_space_->SetCompSpace(0, 1, *identity_mat_space_nc, true);
   jac_c_space_->SetCompSpace(0, 2, *identity_mat_space_nc, true);
   // remaining blocks are zero'ed

   DBG_PRINT((1, "Creating the jac_d_space_\n"));
   // matrix jac_d
   total_rows = orig_d_space->Dim();
   total_cols = orig_x_space->Dim() + 2 * orig_c_space->Dim() + 2 * orig_d_space->Dim();
   jac_d_space_ = new CompoundMatrixSpace(1, 5, total_rows, total_cols);
   jac_d_space_->SetBlockRows(0, orig_d_space->Dim());
   jac_d_space_->SetBlockCols(0, orig_x_space->Dim());
   jac_d_space_->SetBlockCols(1, orig_c_space->Dim());
   jac_d_space_->SetBlockCols(2, orig_c_space->Dim());
   jac_d_space_->SetBlockCols(3, orig_d_space->Dim());
   jac_d_space_->SetBlockCols(4, orig_d_space->Dim());

   jac_d_space_->SetCompSpace(0, 0, *orig_jac_d_space);
   DBG_PRINT((1, "orig_jac_d_space = %x\n", GetRawPtr(orig_jac_d_space)))
   // Blocks (0,1) and (0,2) are zero'ed out
   // **NOTE: By placing "flat" identity matrices here, we are creating
   //         potential issues for linalg operations that arise when the original
   //         NLP has a "compound" d_space. To avoid problems like this,
   //         we place all unmodified component spaces in trivial (size 1)
   //         "compound" spaces.
   jac_d_space_->SetCompSpace(0, 3, *identity_mat_space_nd, true);
   jac_d_space_->SetCompSpace(0, 4, *identity_mat_space_nd, true);

   DBG_PRINT((1, "Creating the h_space_\n"));
   // matrix h
   total_dim = orig_x_space->Dim() + 2 * orig_c_space->Dim() + 2 * orig_d_space->Dim();
   h_space_ = new CompoundSymMatrixSpace(5, total_dim);
   h_space_->SetBlockDim(0, orig_x_space->Dim());
   h_space_->SetBlockDim(1, orig_c_space->Dim());
   h_space_->SetBlockDim(2, orig_c_space->Dim());
   h_space_->SetBlockDim(3, orig_d_space->Dim());
   h_space_->SetBlockDim(4, orig_d_space->Dim());

   SmartPtr<DiagMatrixSpace> DR_x_space = new DiagMatrixSpace(orig_x_space->Dim());
   if( hessian_approximation_ == LIMITED_MEMORY )
   {
      const LowRankUpdateSymMatrixSpace* LR_h_space = static_cast<const LowRankUpdateSymMatrixSpace*>(GetRawPtr(
         orig_h_space));
      DBG_ASSERT(LR_h_space);
      SmartPtr<LowRankUpdateSymMatrixSpace> new_orig_h_space = new LowRankUpdateSymMatrixSpace(LR_h_space->Dim(),
      NULL, orig_x_space, false);
      h_space_->SetCompSpace(0, 0, *new_orig_h_space, true);
   }
   else
   {
      SmartPtr<SumSymMatrixSpace> sumsym_mat_space = new SumSymMatrixSpace(orig_x_space->Dim(), 2);
      sumsym_mat_space->SetTermSpace(0, *orig_h_space);
      sumsym_mat_space->SetTermSpace(1, *DR_x_space);
      h_space_->SetCompSpace(0, 0, *sumsym_mat_space, true);
      // All remaining blocks are zero'ed out
   }

   ///////////////////////////
   // Create the bound data //
   ///////////////////////////

   // x_L
   x_L_ = x_l_space_->MakeNewCompoundVector();
   x_L_->SetComp(0, *orig_ip_nlp_->x_L()); // x >= x_L
   x_L_->GetCompNonConst(1)->Set(0.0); // n_c >= 0
   x_L_->GetCompNonConst(2)->Set(0.0); // p_c >= 0
   x_L_->GetCompNonConst(3)->Set(0.0); // n_d >= 0
   x_L_->GetCompNonConst(4)->Set(0.0); // p_d >= 0
   DBG_PRINT_VECTOR(2, "resto_x_L", *x_L_);

   // x_U
   x_U_ = x_u_space_->MakeNewCompoundVector();
   x_U_->SetComp(0, *orig_ip_nlp_->x_U());

   // d_L
   d_L_ = d_l_space_->MakeNewCompoundVector();
   d_L_->SetComp(0, *orig_ip_nlp_->d_L());

   // d_U
   d_U_ = d_u_space_->MakeNewCompoundVector();
   d_U_->SetComp(0, *orig_ip_nlp_->d_U());

   // Px_L
   Px_L_ = px_l_space_->MakeNewCompoundMatrix();
   Px_L_->SetComp(0, 0, *orig_ip_nlp_->Px_L());
   // Identities are auto-created (true flag passed into SetCompSpace)

   // Px_U
   Px_U_ = px_u_space_->MakeNewCompoundMatrix();
   Px_U_->SetComp(0, 0, *orig_ip_nlp_->Px_U());
   // Remaining matrices will be zero'ed out

   // Pd_L
   //Pd_L_ = orig_ip_nlp_->Pd_L();
   Pd_L_ = pd_l_space_->MakeNewCompoundMatrix();
   Pd_L_->SetComp(0, 0, *orig_ip_nlp_->Pd_L());

   // Pd_U
   //Pd_U_ = orig_ip_nlp_->Pd_U();
   Pd_U_ = pd_u_space_->MakeNewCompoundMatrix();
   Pd_U_->SetComp(0, 0, *orig_ip_nlp_->Pd_U());

   // Getting the NLP scaling

   SmartPtr<const MatrixSpace> scaled_jac_c_space;
   SmartPtr<const MatrixSpace> scaled_jac_d_space;
   SmartPtr<const SymMatrixSpace> scaled_h_space;
   NLP_scaling()->DetermineScaling(GetRawPtr(x_space_), c_space_, d_space_, GetRawPtr(jac_c_space_),
      GetRawPtr(jac_d_space_), GetRawPtr(h_space_), scaled_jac_c_space, scaled_jac_d_space, scaled_h_space, *Px_L_,
      *x_L_, *Px_U_, *x_U_);
   // For now we assume that no scaling is done inside the NLP_Scaling
   DBG_ASSERT(scaled_jac_c_space == jac_c_space_); DBG_ASSERT(scaled_jac_d_space == jac_d_space_); DBG_ASSERT(scaled_h_space == h_space_);

   /////////////////////////////////////////////////////////////////////////
   // Create and initialize the vectors for the restoration phase problem //
   /////////////////////////////////////////////////////////////////////////

   // Vector x
   SmartPtr<CompoundVector> comp_x = x_space_->MakeNewCompoundVector();
   if( init_x )
   {
      comp_x->GetCompNonConst(0)->Copy(*orig_ip_data_->curr()->x());
      comp_x->GetCompNonConst(1)->Set(1.0);
      comp_x->GetCompNonConst(2)->Set(1.0);
      comp_x->GetCompNonConst(3)->Set(1.0);
      comp_x->GetCompNonConst(4)->Set(1.0);
   }
   x = GetRawPtr(comp_x);

   // Vector y_c
   y_c = c_space_->MakeNew();
   if( init_y_c )
   {
      y_c->Set(0.0);  // ToDo
   }

   // Vector y_d
   y_d = d_space_->MakeNew();
   if( init_y_d )
   {
      y_d->Set(0.0);
   }

   // Vector z_L
   z_L = x_l_space_->MakeNew();
   if( init_z_L )
   {
      z_L->Set(1.0);
   }

   // Vector z_U
   z_U = x_u_space_->MakeNew();
   if( init_z_U )
   {
      z_U->Set(1.0);
   }

   // Vector v_L
   v_L = d_l_space_->MakeNew();

   // Vector v_U
   v_U = d_u_space_->MakeNew();

   // Initialize other data needed by the restoration nlp.  x_ref is
   // the point to reference to which we based the regularization
   // term
   x_ref_ = orig_x_space->MakeNew();
   x_ref_->Copy(*orig_ip_data_->curr()->x());

   dr_x_ = orig_x_space->MakeNew();
   dr_x_->Set(1.0);
   SmartPtr<Vector> tmp = dr_x_->MakeNew();
   tmp->Copy(*x_ref_);
   dr_x_->ElementWiseMax(*tmp);
   tmp->Scal(-1.);
   dr_x_->ElementWiseMax(*tmp);
   dr_x_->ElementWiseReciprocal();
   DBG_PRINT_VECTOR(2, "dr_x_", *dr_x_);
   DR_x_ = DR_x_space->MakeNewDiagMatrix();
   DR_x_->SetDiag(*dr_x_);

   return true;
}
コード例 #8
0
  void GradientScaling::DetermineScalingParametersImpl(
    const SmartPtr<const VectorSpace> x_space,
    const SmartPtr<const VectorSpace> c_space,
    const SmartPtr<const VectorSpace> d_space,
    const SmartPtr<const MatrixSpace> jac_c_space,
    const SmartPtr<const MatrixSpace> jac_d_space,
    const SmartPtr<const SymMatrixSpace> h_space,
    Number& df,
    SmartPtr<Vector>& dx,
    SmartPtr<Vector>& dc,
    SmartPtr<Vector>& dd)
  {
    DBG_ASSERT(IsValid(nlp_));

    SmartPtr<Vector> x = x_space->MakeNew();
    if (!nlp_->GetStartingPoint(GetRawPtr(x), true,
                                NULL, false,
                                NULL, false,
                                NULL, false,
                                NULL, false)) {
      THROW_EXCEPTION(FAILED_INITIALIZATION,
                      "Error getting initial point from NLP in GradientScaling.\n");
    }

    //
    // Calculate grad_f scaling
    //
    SmartPtr<Vector> grad_f = x_space->MakeNew();
    if (nlp_->Eval_grad_f(*x, *grad_f)) {
      Number max_grad_f = grad_f->Amax();
      df = 1;
      if (max_grad_f > scaling_max_gradient_) {
        df = scaling_max_gradient_ / max_grad_f;
      }
      Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
                     "Scaling parameter for objective function = %e\n", df);
    }
    else {
      Jnlst().Printf(J_WARNING, J_INITIALIZATION,
                     "Error evaluating objective gradient at user provided starting point.\n  No scaling factor for objective function computed!\n");
      df = 1;
    }
    //
    // No x scaling
    //
    dx = NULL;

    //
    // Calculate c scaling
    //
    SmartPtr<Matrix> jac_c = jac_c_space->MakeNew();
    if (nlp_->Eval_jac_c(*x, *jac_c)) {
      // ToDo: Don't use TripletHelper, have special methods on matrices instead
      Index nnz = TripletHelper::GetNumberEntries(*jac_c);
      Index* irow = new Index[nnz];
      Index* jcol = new Index[nnz];
      Number* values = new Number[nnz];
      TripletHelper::FillRowCol(nnz, *jac_c, irow, jcol);
      TripletHelper::FillValues(nnz, *jac_c, values);
      Number* c_scaling = new Number[jac_c->NRows()];

      for (Index r=0; r<jac_c->NRows(); r++) {
        c_scaling[r] = 0;
      }

      // put the max of each row into c_scaling...
      bool need_c_scale = false;
      for (Index i=0; i<nnz; i++) {
        if (fabs(values[i]) > scaling_max_gradient_) {
          c_scaling[irow[i]-1] = Max(c_scaling[irow[i]-1], fabs(values[i]));
          need_c_scale = true;
        }
      }

      if (need_c_scale) {
        // now compute the scaling factors for each row
        for (Index r=0; r<jac_c->NRows(); r++) {
          Number scaling = 1.0;
          if (c_scaling[r] > scaling_max_gradient_) {
            scaling = scaling_max_gradient_/c_scaling[r];
          }
          c_scaling[r] = scaling;
        }

        dc = c_space->MakeNew();
        TripletHelper::PutValuesInVector(jac_c->NRows(), c_scaling, *dc);
        if (Jnlst().ProduceOutput(J_DETAILED, J_INITIALIZATION)) {
          Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
                         "Equality constraints are scaled with smallest scaling parameter is %e\n", dc->Min());
        }
      }
      else {
        Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
                       "Equality constraints are not scaled.\n");
        dc = NULL;
      }

      delete [] irow;
      irow = NULL;
      delete [] jcol;
      jcol = NULL;
      delete [] values;
      values = NULL;
      delete [] c_scaling;
      c_scaling = NULL;
    }
    else {
      Jnlst().Printf(J_WARNING, J_INITIALIZATION,
                     "Error evaluating Jacobian of equality constraints at user provided starting point.\n  No scaling factors for equality constraints computed!\n");
      dc = NULL;
    }

    //
    // Calculate d scaling
    //
    SmartPtr<Matrix> jac_d = jac_d_space->MakeNew();
    if (nlp_->Eval_jac_d(*x, *jac_d)) {
      Index nnz = TripletHelper::GetNumberEntries(*jac_d);
      Index* irow = new Index[nnz];
      Index* jcol = new Index[nnz];
      Number* values = new Number[nnz];
      TripletHelper::FillRowCol(nnz, *jac_d, irow, jcol);
      TripletHelper::FillValues(nnz, *jac_d, values);
      Number* d_scaling = new Number[jac_d->NRows()];

      for (Index r=0; r<jac_d->NRows(); r++) {
        d_scaling[r] = 0;
      }

      // put the max of each row into c_scaling...
      bool need_d_scale = false;
      for (Index i=0; i<nnz; i++) {
        if (fabs(values[i]) > scaling_max_gradient_) {
          d_scaling[irow[i]-1] = Max(d_scaling[irow[i]-1], fabs(values[i]));
          need_d_scale = true;
        }
      }

      if (need_d_scale) {
        // now compute the scaling factors for each row
        for (Index r=0; r<jac_d->NRows(); r++) {
          Number scaling = 1.0;
          if (d_scaling[r] > scaling_max_gradient_) {
            scaling = scaling_max_gradient_/d_scaling[r];
          }
          d_scaling[r] = scaling;
        }

        dd = d_space->MakeNew();
        TripletHelper::PutValuesInVector(jac_d->NRows(), d_scaling, *dd);
        if (Jnlst().ProduceOutput(J_DETAILED, J_INITIALIZATION)) {
          Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
                         "Inequality constraints are scaled with smallest scaling parameter is %e\n", dd->Min());
        }
      }
      else {
        dd = NULL;
        Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
                       "Inequality constraints are not scaled.\n");
      }

      delete [] irow;
      irow = NULL;
      delete [] jcol;
      jcol = NULL;
      delete [] values;
      values = NULL;
      delete [] d_scaling;
      d_scaling = NULL;
    }
    else {
      Jnlst().Printf(J_WARNING, J_INITIALIZATION,
                     "Error evaluating Jacobian of inequality constraints at user provided starting point.\n  No scaling factors for inequality constraints computed!\n");
      dd = NULL;
    }
  }
コード例 #9
0
ファイル: IpIpoptAlg.cpp プロジェクト: MengbinZhu/pfldp
  Number IpoptAlgorithm::correct_bound_multiplier(
    const Vector& trial_z,
    const Vector& trial_slack,
    const Vector& trial_compl,
    SmartPtr<const Vector>& new_trial_z)
  {
    DBG_START_METH("IpoptAlgorithm::CorrectBoundMultiplier",
                   dbg_verbosity);

    if (kappa_sigma_<1. || trial_z.Dim()==0) {
      new_trial_z = &trial_z;
      return 0.;
    }

    // We choose as barrier parameter to be used either the current
    // algorithmic barrier parameter (if we are not in the free mode),
    // or the average complementarity (at the trial point)
    Number mu;
    if (IpData().FreeMuMode()) {
      mu = IpCq().trial_avrg_compl();
      mu = Min(mu, 1e3);
    }
    else {
      mu = IpData().curr_mu();
    }
    DBG_PRINT((1,"mu = %8.2e\n", mu));
    DBG_PRINT_VECTOR(2, "trial_z", trial_z);

    // First check quickly if anything need to be corrected, using the
    // trial complementarity directly.  Here, Amax is the same as Max
    // (and we use Amax because that can be used later)
    if (trial_compl.Amax() <= kappa_sigma_*mu &&
        trial_compl.Min() >= 1./kappa_sigma_*mu) {
      new_trial_z = &trial_z;
      return 0.;
    }

    SmartPtr<Vector> one_over_s = trial_z.MakeNew();
    one_over_s->Copy(trial_slack);
    one_over_s->ElementWiseReciprocal();

    SmartPtr<Vector> step_z = trial_z.MakeNew();
    step_z->AddTwoVectors(kappa_sigma_*mu, *one_over_s, -1., trial_z, 0.);

    DBG_PRINT_VECTOR(2, "step_z", *step_z);

    Number max_correction_up = Max(0., -step_z->Min());
    if (max_correction_up>0.) {
      SmartPtr<Vector> tmp = trial_z.MakeNew();
      tmp->Set(0.);
      step_z->ElementWiseMin(*tmp);
      tmp->AddTwoVectors(1., trial_z, 1., *step_z, 0.);
      new_trial_z = GetRawPtr(tmp);
    }
    else {
      new_trial_z = &trial_z;
    }

    step_z->AddTwoVectors(1./kappa_sigma_*mu, *one_over_s, -1., *new_trial_z, 0.);

    Number max_correction_low = Max(0., step_z->Max());
    if (max_correction_low>0.) {
      SmartPtr<Vector> tmp = trial_z.MakeNew();
      tmp->Set(0.);
      step_z->ElementWiseMax(*tmp);
      tmp->AddTwoVectors(1., *new_trial_z, 1., *step_z, 0.);
      new_trial_z = GetRawPtr(tmp);
    }

    DBG_PRINT_VECTOR(2, "new_trial_z", *new_trial_z);

    return Max(max_correction_up, max_correction_low);
  }
コード例 #10
0
  void BonminInterface::solve(void* mem) const {
    auto m = static_cast<BonminMemory*>(mem);

    // Check the provided inputs
    checkInputs(mem);

    // Reset statistics
    m->inf_pr.clear();
    m->inf_du.clear();
    m->mu.clear();
    m->d_norm.clear();
    m->regularization_size.clear();
    m->alpha_pr.clear();
    m->alpha_du.clear();
    m->obj.clear();
    m->ls_trials.clear();

    // Reset number of iterations
    m->n_iter = 0;

    // Statistics
    for (auto&& s : m->fstats) s.second.reset();

    // MINLP instance
    SmartPtr<BonminUserClass> tminlp = new BonminUserClass(*this, m);

    BonMinMessageHandler mh;

    // Start an BONMIN application
    BonminSetup bonmin(&mh);

    SmartPtr<OptionsList> options = new OptionsList();
    SmartPtr<Journalist> journalist= new Journalist();
    SmartPtr<Bonmin::RegisteredOptions> roptions = new Bonmin::RegisteredOptions();

    {
      // Direct output through casadi::userOut()
      StreamJournal* jrnl_raw = new StreamJournal("console", J_ITERSUMMARY);
      jrnl_raw->SetOutputStream(&casadi::userOut());
      jrnl_raw->SetPrintLevel(J_DBG, J_NONE);
      SmartPtr<Journal> jrnl = jrnl_raw;
      journalist->AddJournal(jrnl);
    }

    options->SetJournalist(journalist);
    options->SetRegisteredOptions(roptions);
    bonmin.setOptionsAndJournalist(roptions, options, journalist);
    bonmin.registerOptions();

    // Initialize
    bonmin.initialize(GetRawPtr(tminlp));

    m->fstats.at("mainloop").tic();

    if (true) {
      // Branch-and-bound
      Bab bb;
      bb(bonmin);
    }

    //m->return_status = return_status_string(status);
    m->fstats.at("mainloop").toc();

    // Save results to outputs
    casadi_copy(&m->fk, 1, m->f);
    casadi_copy(m->xk, nx_, m->x);
    casadi_copy(m->lam_gk, ng_, m->lam_g);
    casadi_copy(m->lam_xk, nx_, m->lam_x);
    casadi_copy(m->gk, ng_, m->g);

  }
コード例 #11
0
  void EquilibrationScaling::DetermineScalingParametersImpl(
    const SmartPtr<const VectorSpace> x_space,
    const SmartPtr<const VectorSpace> c_space,
    const SmartPtr<const VectorSpace> d_space,
    const SmartPtr<const MatrixSpace> jac_c_space,
    const SmartPtr<const MatrixSpace> jac_d_space,
    const SmartPtr<const SymMatrixSpace> h_space,
    const Matrix& Px_L, const Vector& x_L,
    const Matrix& Px_U, const Vector& x_U,
    Number& df,
    SmartPtr<Vector>& dx,
    SmartPtr<Vector>& dc,
    SmartPtr<Vector>& dd)
  {
    DBG_ASSERT(IsValid(nlp_));

    SmartPtr<Vector> x0 = x_space->MakeNew();
    if (!nlp_->GetStartingPoint(GetRawPtr(x0), true,
                                NULL, false,
                                NULL, false,
                                NULL, false,
                                NULL, false)) {
      THROW_EXCEPTION(FAILED_INITIALIZATION,
                      "Error getting initial point from NLP in EquilibrationScaling.\n");
    }

    // We store the added absolute values of the Jacobian and
    // objective function gradient in an array of sufficient size

    SmartPtr<Matrix> jac_c = jac_c_space->MakeNew();
    SmartPtr<Matrix> jac_d = jac_d_space->MakeNew();
    SmartPtr<Vector> grad_f = x_space->MakeNew();
    const Index nnz_jac_c = TripletHelper::GetNumberEntries(*jac_c);
    const Index nnz_jac_d = TripletHelper::GetNumberEntries(*jac_d);
    const Index nc = jac_c_space->NRows();
    const Index nd = jac_d_space->NRows();
    const Index nx = x_space->Dim();
    Number* avrg_values = new Number[nnz_jac_c+nnz_jac_d+nx];
    Number* val_buffer = new Number[Max(nnz_jac_c,nnz_jac_d,nx)];

    SmartPtr<PointPerturber> perturber =
      new PointPerturber(*x0, point_perturbation_radius_,
                         Px_L, x_L, Px_U, x_U);

    const Index num_evals = 4;
    const Index max_num_eval_errors = 10;
    Index num_eval_errors = 0;
    for (Index ieval=0; ieval<num_evals; ieval++) {
      // Compute obj gradient and Jacobian at random perturbation point
      bool done = false;
      while (!done) {
        SmartPtr<Vector> xpert = perturber->MakeNewPerturbedPoint();
        done = (nlp_->Eval_grad_f(*xpert, *grad_f) &&
                nlp_->Eval_jac_c(*xpert, *jac_c) &&
                nlp_->Eval_jac_d(*xpert, *jac_d));
        if (!done) {
          Jnlst().Printf(J_WARNING, J_INITIALIZATION,
                         "Error evaluating first derivatives as at perturbed point for equilibration-based scaling.\n");
          num_eval_errors++;
        }
        if (num_eval_errors>max_num_eval_errors) {
          delete [] val_buffer;
          delete [] avrg_values;
          THROW_EXCEPTION(FAILED_INITIALIZATION,
                          "Too many evaluation failures during equilibiration-based scaling.");
        }
      }
      // Get the numbers out of the matrices and vectors, and add it
      // to avrg_values
      TripletHelper::FillValues(nnz_jac_c, *jac_c, val_buffer);
      if (ieval==0) {
        for (Index i=0; i<nnz_jac_c; i++) {
          avrg_values[i] = fabs(val_buffer[i]);
        }
      }
      else {
        for (Index i=0; i<nnz_jac_c; i++) {
          avrg_values[i] += fabs(val_buffer[i]);
        }
      }
      TripletHelper::FillValues(nnz_jac_d, *jac_d, val_buffer);
      if (ieval==0) {
        for (Index i=0; i<nnz_jac_d; i++) {
          avrg_values[nnz_jac_c+i] = fabs(val_buffer[i]);
        }
      }
      else {
        for (Index i=0; i<nnz_jac_d; i++) {
          avrg_values[nnz_jac_c+i] += fabs(val_buffer[i]);
        }
      }
      TripletHelper::FillValuesFromVector(nx, *grad_f, val_buffer);
      if (ieval==0) {
        for (Index i=0; i<nx; i++) {
          avrg_values[nnz_jac_c+nnz_jac_d+i] = fabs(val_buffer[i]);
        }
      }
      else {
        for (Index i=0; i<nx; i++) {
          avrg_values[nnz_jac_c+nnz_jac_d+i] += fabs(val_buffer[i]);
        }
      }
    }
    delete [] val_buffer;
    for (Index i=0; i<nnz_jac_c+nnz_jac_d+nx; i++) {
      avrg_values[i] /= (Number)num_evals;
    }

    // Get the sparsity structure
    ipfint* AIRN = new ipfint[nnz_jac_c+nnz_jac_d+nx];
    ipfint* AJCN = new ipfint[nnz_jac_c+nnz_jac_d+nx];
    if (sizeof(ipfint)==sizeof(Index)) {
      TripletHelper::FillRowCol(nnz_jac_c, *jac_c, &AIRN[0], &AJCN[0]);
      TripletHelper::FillRowCol(nnz_jac_d, *jac_d, &AIRN[nnz_jac_c], &AJCN[nnz_jac_c], nc);
    }
    else {
      THROW_EXCEPTION(INTERNAL_ABORT,
                      "Need to implement missing code in EquilibriationScaling.");
    }
    // sort out the zero entries in objective function gradient
    Index nnz_grad_f = 0;
    const Index idx = nnz_jac_c+nnz_jac_d;
    for (Index i=0; i<nx; i++) {
      if (avrg_values[idx+i] != 0.) {
        AIRN[idx+nnz_grad_f] = nc+nd+1;
        AJCN[idx+nnz_grad_f] = i+1;
        avrg_values[idx+nnz_grad_f] = avrg_values[idx+i];
        nnz_grad_f++;
      }
    }

    // Now call MC19 to compute the scaling factors
    const ipfint N = Max(nc+nd+1,nx);
    float* R = new float[N];
    float* C = new float[N];
    float* W = new float[5*N];
#if defined(COINHSL_HAS_MC19) || defined(HAVE_LINEARSOLVERLOADER)
    const ipfint NZ = nnz_jac_c+nnz_jac_d+nnz_grad_f;
    //F77_FUNC(mc19ad,MC19AD)(&N, &NZ, avrg_values, AIRN, AJCN, R, C, W);
    F77_FUNC(mc19ad,MC19AD)(&N, &NZ, avrg_values, AJCN, AIRN, C, R, W);
#else

    THROW_EXCEPTION(OPTION_INVALID,
                    "Currently cannot do equilibration-based NLP scaling if MC19 is not available.");
#endif

    delete[] W;

    delete [] avrg_values;
    delete [] AIRN;
    delete [] AJCN;

    // Correct the scaling values
    Number* row_scale = new Number[nc+nd+1];
    Number* col_scale = new Number[nx];
    for (Index i=0; i<nc+nd+1; i++) {
      row_scale[i] = exp((Number)R[i]);
    }
    for (Index i=0; i<nx; i++) {
      col_scale[i] = exp((Number)C[i]);
    }
    delete [] R;
    delete [] C;

    // get the scaling factors
    df = row_scale[nc+nd];
    dc = c_space->MakeNew();
    TripletHelper::PutValuesInVector(nc, &row_scale[0], *dc);
    dd = d_space->MakeNew();
    TripletHelper::PutValuesInVector(nd, &row_scale[nc], *dd);
    dx = x_space->MakeNew();
    TripletHelper::PutValuesInVector(nx, col_scale, *dx);

    delete [] row_scale;
    delete [] col_scale;
  }
コード例 #12
0
ファイル: IpAlgBuilder.cpp プロジェクト: MengbinZhu/pfldp
  SmartPtr<IpoptAlgorithm>
  AlgorithmBuilder::BuildBasicAlgorithm(const Journalist& jnlst,
                                        const OptionsList& options,
                                        const std::string& prefix)
  {
    DBG_START_FUN("AlgorithmBuilder::BuildBasicAlgorithm",
                  dbg_verbosity);

    bool mehrotra_algorithm;
    options.GetBoolValue("mehrotra_algorithm", mehrotra_algorithm, prefix);

    // Create the convergence check
    SmartPtr<ConvergenceCheck> convCheck =
      new OptimalityErrorConvergenceCheck();

    // Create the solvers that will be used by the main algorithm

    SmartPtr<SparseSymLinearSolverInterface> SolverInterface;
    std::string linear_solver;
    options.GetStringValue("linear_solver", linear_solver, prefix);
    bool use_custom_solver = false;
    if (linear_solver=="ma27") {
#ifndef COINHSL_HAS_MA27
# ifdef HAVE_LINEARSOLVERLOADER
      SolverInterface = new Ma27TSolverInterface();
      if (!LSL_isMA27available()) {
        char buf[256];
        int rc = LSL_loadHSL(NULL, buf, 255);
        if (rc) {
          std::string errmsg;
          errmsg = "Selected linear solver MA27 not available.\nTried to obtain MA27 from shared library \"";
          errmsg += LSL_HSLLibraryName();
          errmsg += "\", but the following error occured:\n";
          errmsg += buf;
          THROW_EXCEPTION(OPTION_INVALID, errmsg.c_str());
        }
      }
# else
      THROW_EXCEPTION(OPTION_INVALID, "Support for MA27 has not been compiled into Ipopt.");
# endif
#else
      SolverInterface = new Ma27TSolverInterface();
#endif

    }
    else if (linear_solver=="ma57") {
#ifndef COINHSL_HAS_MA57
# ifdef HAVE_LINEARSOLVERLOADER
      SolverInterface = new Ma57TSolverInterface();
      if (!LSL_isMA57available()) {
        char buf[256];
        int rc = LSL_loadHSL(NULL, buf, 255);
        if (rc) {
          std::string errmsg;
          errmsg = "Selected linear solver MA57 not available.\nTried to obtain MA57 from shared library \"";
          errmsg += LSL_HSLLibraryName();
          errmsg += "\", but the following error occured:\n";
          errmsg += buf;
          THROW_EXCEPTION(OPTION_INVALID, errmsg.c_str());
        }
      }
# else
      THROW_EXCEPTION(OPTION_INVALID, "Support for MA57 has not been compiled into Ipopt.");
# endif
#else
      SolverInterface = new Ma57TSolverInterface();
#endif

    }
    else if (linear_solver=="ma77") {
#ifndef COINHSL_HAS_MA77
# ifdef HAVE_LINEARSOLVERLOADER
      SolverInterface = new Ma77SolverInterface();
      if (!LSL_isMA77available()) {
        char buf[256];
        int rc = LSL_loadHSL(NULL, buf, 255);
        if (rc) {
          std::string errmsg;
          errmsg = "Selected linear solver HSL_MA77 not available.\nTried to obtain HSL_MA77 from shared library \"";
          errmsg += LSL_HSLLibraryName();
          errmsg += "\", but the following error occured:\n";
          errmsg += buf;
          THROW_EXCEPTION(OPTION_INVALID, errmsg.c_str());
        }
      }
# else
      THROW_EXCEPTION(OPTION_INVALID, "Support for HSL_MA77 has not been compiled into Ipopt.");
# endif
#else
      SolverInterface = new Ma77SolverInterface();
#endif

    }
    else if (linear_solver=="ma86") {
#ifndef COINHSL_HAS_MA86
# ifdef HAVE_LINEARSOLVERLOADER
      SolverInterface = new Ma86SolverInterface();
      if (!LSL_isMA86available()) {
        char buf[256];
        int rc = LSL_loadHSL(NULL, buf, 255);
        if (rc) {
          std::string errmsg;
          errmsg = "Selected linear solver HSL_MA86 not available.\nTried to obtain HSL_MA86 from shared library \"";
          errmsg += LSL_HSLLibraryName();
          errmsg += "\", but the following error occured:\n";
          errmsg += buf;
          THROW_EXCEPTION(OPTION_INVALID, errmsg.c_str());
        }
      }
# else
      THROW_EXCEPTION(OPTION_INVALID, "Support for HSL_MA86 has not been compiled into Ipopt.");
# endif
#else
      SolverInterface = new Ma86SolverInterface();
#endif

    }
    else if (linear_solver=="pardiso") {
#ifndef HAVE_PARDISO
# ifdef HAVE_LINEARSOLVERLOADER
      SolverInterface = new PardisoSolverInterface();
      char buf[256];
      int rc = LSL_loadPardisoLib(NULL, buf, 255);
      if (rc) {
        std::string errmsg;
        errmsg = "Selected linear solver Pardiso not available.\nTried to obtain Pardiso from shared library \"";
        errmsg += LSL_PardisoLibraryName();
        errmsg += "\", but the following error occured:\n";
        errmsg += buf;
        THROW_EXCEPTION(OPTION_INVALID, errmsg.c_str());
      }
# else
      THROW_EXCEPTION(OPTION_INVALID, "Support for Pardiso has not been compiled into Ipopt.");
# endif
#else
      SolverInterface = new PardisoSolverInterface();
#endif

    }
    else if (linear_solver=="ma97") {
#ifndef COINHSL_HAS_MA97
# ifdef HAVE_LINEARSOLVERLOADER
      SolverInterface = new Ma97SolverInterface();
      if (!LSL_isMA97available()) {
        char buf[256];
        int rc = LSL_loadHSL(NULL, buf, 255);
        if (rc) {
          std::string errmsg;
          errmsg = "Selected linear solver HSL_MA97 not available.\nTried to obtain HSL_MA97 from shared library \"";
          errmsg += LSL_HSLLibraryName();
          errmsg += "\", but the following error occured:\n";
          errmsg += buf;
          THROW_EXCEPTION(OPTION_INVALID, errmsg.c_str());
        }
      }
# else
      THROW_EXCEPTION(OPTION_INVALID, "Support for HSL_MA97 has not been compiled into Ipopt.");
# endif
#else
      SolverInterface = new Ma97SolverInterface();
#endif

    }
    else if (linear_solver=="wsmp") {
#ifdef HAVE_WSMP
      bool wsmp_iterative;
      options.GetBoolValue("wsmp_iterative", wsmp_iterative, prefix);
      if (wsmp_iterative) {
        SolverInterface = new IterativeWsmpSolverInterface();
      }
      else {
        SolverInterface = new WsmpSolverInterface();
      }
#else

      THROW_EXCEPTION(OPTION_INVALID,
                      "Selected linear solver WSMP not available.");
#endif

    }
    else if (linear_solver=="mumps") {
#ifdef COIN_HAS_MUMPS
      SolverInterface = new MumpsSolverInterface();
#else

      THROW_EXCEPTION(OPTION_INVALID,
                      "Selected linear solver MUMPS not available.");
#endif

    }
    else if (linear_solver=="custom") {
      ASSERT_EXCEPTION(IsValid(custom_solver_), OPTION_INVALID,
                       "Selected linear solver CUSTOM not available.");
      use_custom_solver = true;
    }

    SmartPtr<AugSystemSolver> AugSolver;
    if (use_custom_solver) {
      AugSolver = custom_solver_;
    }
    else {
      SmartPtr<TSymScalingMethod> ScalingMethod;
      std::string linear_system_scaling;
      if (!options.GetStringValue("linear_system_scaling",
                                  linear_system_scaling, prefix)) {
        // By default, don't use mc19 for non-HSL solvers, or HSL_MA97
        if (linear_solver!="ma27" && linear_solver!="ma57" && linear_solver!="ma77" && linear_solver!="ma86") {
          linear_system_scaling="none";
        }
      }
      if (linear_system_scaling=="mc19") {
#ifndef COINHSL_HAS_MC19
# ifdef HAVE_LINEARSOLVERLOADER
        ScalingMethod = new Mc19TSymScalingMethod();
        if (!LSL_isMC19available()) {
          char buf[256];
          int rc = LSL_loadHSL(NULL, buf, 255);
          if (rc) {
            std::string errmsg;
            errmsg = "Selected linear system scaling method MC19 not available.\n";
            errmsg += buf;
            THROW_EXCEPTION(OPTION_INVALID, errmsg.c_str());
          }
        }
# else
        THROW_EXCEPTION(OPTION_INVALID, "Support for MC19 has not been compiled into Ipopt.");
# endif
#else
        ScalingMethod = new Mc19TSymScalingMethod();
#endif

      }
      else if (linear_system_scaling=="slack-based") {
        ScalingMethod = new SlackBasedTSymScalingMethod();
      }

      SmartPtr<SymLinearSolver> ScaledSolver =
        new TSymLinearSolver(SolverInterface, ScalingMethod);

      AugSolver = new StdAugSystemSolver(*ScaledSolver);
    }

    Index enum_int;
    options.GetEnumValue("hessian_approximation", enum_int, prefix);
    HessianApproximationType hessian_approximation =
      HessianApproximationType(enum_int);
    if (hessian_approximation==LIMITED_MEMORY) {
      std::string lm_aug_solver;
      options.GetStringValue("limited_memory_aug_solver", lm_aug_solver,
                             prefix);
      if (lm_aug_solver == "sherman-morrison") {
        AugSolver = new LowRankAugSystemSolver(*AugSolver);
      }
      else if (lm_aug_solver == "extended") {
        Index lm_history;
        options.GetIntegerValue("limited_memory_max_history", lm_history,
                                prefix);
        Index max_rank;
        std::string lm_type;
        options.GetStringValue("limited_memory_update_type", lm_type, prefix);
        if (lm_type == "bfgs") {
          max_rank = 2*lm_history;
        }
        else if (lm_type == "sr1") {
          max_rank = lm_history;
        }
        else {
          THROW_EXCEPTION(OPTION_INVALID, "Unknown value for option \"limited_memory_update_type\".");
        }
        AugSolver = new LowRankSSAugSystemSolver(*AugSolver, max_rank);
      }
      else {
        THROW_EXCEPTION(OPTION_INVALID, "Unknown value for option \"limited_memory_aug_solver\".");
      }
    }

    SmartPtr<PDPerturbationHandler> pertHandler;
    std::string lsmethod;
    options.GetStringValue("line_search_method", lsmethod, prefix);
    if (lsmethod=="cg-penalty") {
      pertHandler = new CGPerturbationHandler();
    }
    else {
      pertHandler = new PDPerturbationHandler();
    }
    SmartPtr<PDSystemSolver> PDSolver =
      new PDFullSpaceSolver(*AugSolver, *pertHandler);

    // Create the object for initializing the iterates Initialization
    // object.  We include both the warm start and the defaut
    // initializer, so that the warm start options can be activated
    // without having to rebuild the algorithm
    SmartPtr<EqMultiplierCalculator> EqMultCalculator =
      new LeastSquareMultipliers(*AugSolver);
    SmartPtr<IterateInitializer> WarmStartInitializer =
      new WarmStartIterateInitializer();
    SmartPtr<IterateInitializer> IterInitializer =
      new DefaultIterateInitializer(EqMultCalculator, WarmStartInitializer,
                                    AugSolver);

    SmartPtr<RestorationPhase> resto_phase;
    SmartPtr<RestoConvergenceCheck> resto_convCheck;

    // We only need a restoration phase object if we use the filter
    // line search
    if (lsmethod=="filter" || lsmethod=="penalty") {
      // Solver for the restoration phase
      SmartPtr<AugSystemSolver> resto_AugSolver =
        new AugRestoSystemSolver(*AugSolver);
      SmartPtr<PDPerturbationHandler> resto_pertHandler =
        new PDPerturbationHandler();
      SmartPtr<PDSystemSolver> resto_PDSolver =
        new PDFullSpaceSolver(*resto_AugSolver, *resto_pertHandler);

      // Convergence check in the restoration phase
      if (lsmethod=="filter") {
        resto_convCheck = new RestoFilterConvergenceCheck();
      }
      else if (lsmethod=="penalty") {
        resto_convCheck = new RestoPenaltyConvergenceCheck();
      }

      // Line search method for the restoration phase
      SmartPtr<RestoRestorationPhase> resto_resto =
        new RestoRestorationPhase();

      SmartPtr<BacktrackingLSAcceptor> resto_LSacceptor;
      std::string resto_lsacceptor;
      options.GetStringValue("line_search_method", resto_lsacceptor,
                             "resto."+prefix);
      if (resto_lsacceptor=="filter") {
        resto_LSacceptor = new FilterLSAcceptor(GetRawPtr(resto_PDSolver));
      }
      else if (resto_lsacceptor=="cg-penalty") {
        resto_LSacceptor = new CGPenaltyLSAcceptor(GetRawPtr(resto_PDSolver));
      }
      else if (resto_lsacceptor=="penalty") {
        resto_LSacceptor = new PenaltyLSAcceptor(GetRawPtr(resto_PDSolver));
      }
      SmartPtr<LineSearch> resto_LineSearch =
        new BacktrackingLineSearch(resto_LSacceptor, GetRawPtr(resto_resto),
                                   GetRawPtr(resto_convCheck));

      // Create the mu update that will be used by the restoration phase
      // algorithm
      SmartPtr<MuUpdate> resto_MuUpdate;
      std::string resto_smuupdate;
      if (!options.GetStringValue("mu_strategy", resto_smuupdate, "resto."+prefix)) {
        // Change default for quasi-Newton option (then we use adaptive)
        Index enum_int;
        if (options.GetEnumValue("hessian_approximation", enum_int, prefix)) {
          HessianApproximationType hessian_approximation =
            HessianApproximationType(enum_int);
          if (hessian_approximation==LIMITED_MEMORY) {
            resto_smuupdate = "adaptive";
          }
        }
      }

      std::string resto_smuoracle;
      std::string resto_sfixmuoracle;
      if (resto_smuupdate=="adaptive" ) {
        options.GetStringValue("mu_oracle", resto_smuoracle, "resto."+prefix);
        options.GetStringValue("fixed_mu_oracle", resto_sfixmuoracle, "resto."+prefix);
      }

      if (resto_smuupdate=="monotone" ) {
        resto_MuUpdate = new MonotoneMuUpdate(GetRawPtr(resto_LineSearch));
      }
      else if (resto_smuupdate=="adaptive") {
        SmartPtr<MuOracle> resto_MuOracle;
        if (resto_smuoracle=="loqo") {
          resto_MuOracle = new LoqoMuOracle();
        }
        else if (resto_smuoracle=="probing") {
          resto_MuOracle = new ProbingMuOracle(resto_PDSolver);
        }
        else if (resto_smuoracle=="quality-function") {
          resto_MuOracle = new QualityFunctionMuOracle(resto_PDSolver);
        }
        SmartPtr<MuOracle> resto_FixMuOracle;
        if (resto_sfixmuoracle=="loqo") {
          resto_FixMuOracle = new LoqoMuOracle();
        }
        else if (resto_sfixmuoracle=="probing") {
          resto_FixMuOracle = new ProbingMuOracle(resto_PDSolver);
        }
        else if (resto_sfixmuoracle=="quality-function") {
          resto_FixMuOracle = new QualityFunctionMuOracle(resto_PDSolver);
        }
        else {
          resto_FixMuOracle = NULL;
        }
        resto_MuUpdate =
          new AdaptiveMuUpdate(GetRawPtr(resto_LineSearch),
                               resto_MuOracle, resto_FixMuOracle);
      }

      // Initialization of the iterates for the restoration phase
      SmartPtr<EqMultiplierCalculator> resto_EqMultCalculator =
        new LeastSquareMultipliers(*resto_AugSolver);
      SmartPtr<IterateInitializer> resto_IterInitializer =
        new RestoIterateInitializer(resto_EqMultCalculator);

      // Create the object for the iteration output during restoration
      SmartPtr<OrigIterationOutput> resto_OrigIterOutput = NULL;
      //   new OrigIterationOutput();
      SmartPtr<IterationOutput> resto_IterOutput =
        new RestoIterationOutput(resto_OrigIterOutput);

      // Get the Hessian updater for the restoration phase
      SmartPtr<HessianUpdater> resto_HessUpdater;
      switch (hessian_approximation) {
      case EXACT:
        resto_HessUpdater = new ExactHessianUpdater();
        break;
      case LIMITED_MEMORY:
        // ToDo This needs to be replaced!
        resto_HessUpdater  = new LimMemQuasiNewtonUpdater(true);
        break;
      }

      // Put together the overall restoration phase IP algorithm
      SmartPtr<SearchDirectionCalculator> resto_SearchDirCalc;
      if (resto_lsacceptor=="cg-penalty") {
        resto_SearchDirCalc = new CGSearchDirCalculator(GetRawPtr(resto_PDSolver));
      }
      else {
        resto_SearchDirCalc = new PDSearchDirCalculator(GetRawPtr(resto_PDSolver));
      }

      SmartPtr<IpoptAlgorithm> resto_alg =
        new IpoptAlgorithm(resto_SearchDirCalc,
                           GetRawPtr(resto_LineSearch),
                           GetRawPtr(resto_MuUpdate),
                           GetRawPtr(resto_convCheck),
                           resto_IterInitializer,
                           resto_IterOutput,
                           resto_HessUpdater,
                           resto_EqMultCalculator);

      // Set the restoration phase
      resto_phase =
        new MinC_1NrmRestorationPhase(*resto_alg, EqMultCalculator);
    }

    // Create the line search to be used by the main algorithm
    SmartPtr<BacktrackingLSAcceptor> LSacceptor;
    if (lsmethod=="filter") {
      LSacceptor = new FilterLSAcceptor(GetRawPtr(PDSolver));
    }
    else if (lsmethod=="cg-penalty") {
      LSacceptor = new CGPenaltyLSAcceptor(GetRawPtr(PDSolver));
    }
    else if (lsmethod=="penalty") {
      LSacceptor = new PenaltyLSAcceptor(GetRawPtr(PDSolver));
    }
    SmartPtr<LineSearch> lineSearch =
      new BacktrackingLineSearch(LSacceptor,
                                 GetRawPtr(resto_phase), convCheck);

    // The following cross reference is not good: We have to store a
    // pointer to the lineSearch object in resto_convCheck as a
    // non-SmartPtr to make sure that things are properly deleted when
    // the IpoptAlgorithm return by the Builder is destructed.
    if (IsValid(resto_convCheck)) {
      resto_convCheck->SetOrigLSAcceptor(*LSacceptor);
    }

    // Create the mu update that will be used by the main algorithm
    SmartPtr<MuUpdate> MuUpdate;
    std::string smuupdate;
    if (!options.GetStringValue("mu_strategy", smuupdate, prefix)) {
      // Change default for quasi-Newton option (then we use adaptive)
      Index enum_int;
      if (options.GetEnumValue("hessian_approximation", enum_int, prefix)) {
        HessianApproximationType hessian_approximation =
          HessianApproximationType(enum_int);
        if (hessian_approximation==LIMITED_MEMORY) {
          smuupdate = "adaptive";
        }
      }
      if (mehrotra_algorithm)
        smuupdate = "adaptive";
    }
    ASSERT_EXCEPTION(!mehrotra_algorithm || smuupdate=="adaptive",
                     OPTION_INVALID,
                     "If mehrotra_algorithm=yes, mu_strategy must be \"adaptive\".");
    std::string smuoracle;
    std::string sfixmuoracle;
    if (smuupdate=="adaptive" ) {
      if (!options.GetStringValue("mu_oracle", smuoracle, prefix)) {
        if (mehrotra_algorithm)
          smuoracle = "probing";
      }
      options.GetStringValue("fixed_mu_oracle", sfixmuoracle, prefix);
      ASSERT_EXCEPTION(!mehrotra_algorithm || smuoracle=="probing",
                       OPTION_INVALID,
                       "If mehrotra_algorithm=yes, mu_oracle must be \"probing\".");
    }

    if (smuupdate=="monotone" ) {
      MuUpdate = new MonotoneMuUpdate(GetRawPtr(lineSearch));
    }
    else if (smuupdate=="adaptive") {
      SmartPtr<MuOracle> muOracle;
      if (smuoracle=="loqo") {
        muOracle = new LoqoMuOracle();
      }
      else if (smuoracle=="probing") {
        muOracle = new ProbingMuOracle(PDSolver);
      }
      else if (smuoracle=="quality-function") {
        muOracle = new QualityFunctionMuOracle(PDSolver);
      }
      SmartPtr<MuOracle> FixMuOracle;
      if (sfixmuoracle=="loqo") {
        FixMuOracle = new LoqoMuOracle();
      }
      else if (sfixmuoracle=="probing") {
        FixMuOracle = new ProbingMuOracle(PDSolver);
      }
      else if (sfixmuoracle=="quality-function") {
        FixMuOracle = new QualityFunctionMuOracle(PDSolver);
      }
      else {
        FixMuOracle = NULL;
      }
      MuUpdate = new AdaptiveMuUpdate(GetRawPtr(lineSearch),
                                      muOracle, FixMuOracle);
    }

    // Create the object for the iteration output
    SmartPtr<IterationOutput> IterOutput =
      new OrigIterationOutput();

    // Get the Hessian updater for the main algorithm
    SmartPtr<HessianUpdater> HessUpdater;
    switch (hessian_approximation) {
    case EXACT:
      HessUpdater = new ExactHessianUpdater();
      break;
    case LIMITED_MEMORY:
      // ToDo This needs to be replaced!
      HessUpdater  = new LimMemQuasiNewtonUpdater(false);
      break;
    }

    // Create the main algorithm
    SmartPtr<SearchDirectionCalculator> SearchDirCalc;
    if (lsmethod=="cg-penalty") {
      SearchDirCalc = new CGSearchDirCalculator(GetRawPtr(PDSolver));
    }
    else {
      SearchDirCalc = new PDSearchDirCalculator(GetRawPtr(PDSolver));
    }
    SmartPtr<IpoptAlgorithm> alg =
      new IpoptAlgorithm(SearchDirCalc,
                         GetRawPtr(lineSearch), MuUpdate,
                         convCheck, IterInitializer, IterOutput,
                         HessUpdater, EqMultCalculator);

    return alg;
  }
コード例 #13
0
  /** Output Latex table of options.*/
  void 
  RegisteredOptions::writeLatexOptionsTable(std::ostream &of, ExtraCategoriesInfo which){
  std::map<std::string, Ipopt::SmartPtr<Ipopt::RegisteredOption> > 
           registered_options = RegisteredOptionsList();

  //Print table header
  //of<<"\\begin{threeparttable}"<<std::endl
  of<<"\\topcaption{\\label{tab:options} "<<std::endl
    <<"List of options and compatibility with the different algorithms."<<std::endl
    <<"}"<<std::endl;
  of<<"\\tablehead{\\hline "<<std::endl
    <<"Option & type & ";
  //of<<" default & ";
  of<<"{\\tt B-BB} & {\\tt B-OA} & {\\tt B-QG} & {\\tt B-Hyb} & {\\tt B-Ecp} & {\\tt B-iFP} & {\\tt Cbc\\_Par} \\\\"<<std::endl
    <<"\\hline"<<std::endl
    <<"\\hline}"<<std::endl;
  of<<"\\tabletail{\\hline \\multicolumn{9}{|c|}{continued on next page}\\\\"
    <<"\\hline}"<<std::endl; 
  of<<"\\tablelasttail{\\hline}"<<std::endl;
  of<<"{\\footnotesize"<<std::endl;
  of<<"\\begin{xtabular}{@{}|@{\\;}l@{\\;}|@{\\;}r@{\\;}|@{\\;}c@{\\;}|@{\\;}c@{\\;}|@{\\;}c@{\\;}|@{\\;}c@{\\;}|@{\\;}c@{\\;}|@{\\;}c@{\\;}|@{\\;}c@{\\;}|@{}}"<<std::endl;

  //sort options by categories and alphabetical order
  std::list< Ipopt::RegisteredOption * > sortedOptions;
 
  for(std::map<std::string, Ipopt::SmartPtr<Ipopt::RegisteredOption > >::iterator i = 
          registered_options.begin(); i != registered_options.end() ; i++){
     if(categoriesInfo(i->second->RegisteringCategory()) == which)
     sortedOptions.push_back(GetRawPtr(i->second));
     }

   sortedOptions.sort(optionsCmp());
   std::string registeringCategory = "";
   for(std::list< Ipopt::RegisteredOption * >::iterator i = sortedOptions.begin();
       i != sortedOptions.end() ; i++)
   {
     if((*i)->RegisteringCategory() != registeringCategory){
     registeringCategory = (*i)->RegisteringCategory();
     of<<"\\hline"<<std::endl
       <<"\\multicolumn{1}{|c}{} & \\multicolumn{8}{l|}{"
       <<registeringCategory<<"}\\\\"<<std::endl
       <<"\\hline"<<std::endl;
     }
     
     of<<makeLatex((*i)->Name())<<"& "<<OptionType2Char((*i)->Type())//<<"& "
       //<<makeLatex(defaultAsString(*i))
       <<"& "<<( (isValidForBBB((*i)->Name()))? "$\\surd$" : "-" )
       <<"& "<<( (isValidForBOA((*i)->Name()))? "$\\surd$" : "-" )
       <<"& "<<( (isValidForBQG((*i)->Name()))? "$\\surd$" : "-" )
       <<"& "<<( (isValidForHybrid((*i)->Name()))? "$\\surd$" : "-" )
       <<"& "<<( (isValidForBEcp((*i)->Name()))? "$\\surd$" : "-" )
       <<"& "<<( (isValidForBiFP((*i)->Name()))? "$\\surd$" : "-" )
       <<"& "<<( (isValidForCbc((*i)->Name()))? "$\\surd$" : "-" )
       <<"\\\\"<<std::endl;
   }
   //Print table end
  of<<"\\hline"<<std::endl
    <<"\\end{xtabular}"<<std::endl;
  of<<"}"<<std::endl;
#if 0
  of<<"\\begin{tablenotes}"<<std::endl
    <<"\\item $\\strut^*$ option is available"<<std::endl
    <<"        for MILP subsolver (it is only passed if the {\\tt milp\\_subsolver} optio"<<std::endl
    <<"        see Subsection \\ref{sec:milp_opt})."<<std::endl
    <<"       \\item $\\strut^1$ disabled for stability reasons."<<std::endl
    <<"\\end{tablenotes}"<<std::endl
    <<"\\end{threeparttable} "<<std::endl;
#endif
  }
コード例 #14
0
 inline Vector* Vec(Index i)
 {
   DBG_ASSERT(i < NCols());
   DBG_ASSERT(IsValid(non_const_vecs_[i]));
   return GetRawPtr(non_const_vecs_[i]);
 }
コード例 #15
0
ファイル: IpRestoIpoptNLP.hpp プロジェクト: imzye/coin
 virtual SmartPtr<const VectorSpace> x_space() const
 {
     return GetRawPtr(x_space_);
 }
コード例 #16
0
SmartPtr<LineSearch> AlgorithmBuilder::BuildLineSearch(
   const Journalist&     jnlst,
   const OptionsList&    options,
   const std::string&    prefix
   )
{
   DBG_ASSERT(IsValid(ConvCheck_));
   DBG_ASSERT(IsValid(EqMultCalculator_));

   Index enum_int;
   options.GetEnumValue("hessian_approximation", enum_int, prefix);
   HessianApproximationType hessian_approximation = HessianApproximationType(enum_int);

   SmartPtr<RestorationPhase> resto_phase;
   SmartPtr<RestoConvergenceCheck> resto_convCheck;

   // We only need a restoration phase object if we use the filter
   // line search
   std::string lsmethod;
   options.GetStringValue("line_search_method", lsmethod, prefix);
   if( lsmethod == "filter" || lsmethod == "penalty" )
   {
      // Solver for the restoration phase
      SmartPtr<AugSystemSolver> resto_AugSolver = new AugRestoSystemSolver(*GetAugSystemSolver(jnlst, options, prefix));
      SmartPtr<PDPerturbationHandler> resto_pertHandler = new PDPerturbationHandler();
      SmartPtr<PDSystemSolver> resto_PDSolver = new PDFullSpaceSolver(*resto_AugSolver, *resto_pertHandler);

      // Convergence check in the restoration phase
      if( lsmethod == "filter" )
      {
         resto_convCheck = new RestoFilterConvergenceCheck();
      }
      else if( lsmethod == "penalty" )
      {
         resto_convCheck = new RestoPenaltyConvergenceCheck();
      }

      // Line search method for the restoration phase
      SmartPtr<RestoRestorationPhase> resto_resto = new RestoRestorationPhase();

      SmartPtr<BacktrackingLSAcceptor> resto_LSacceptor;
      std::string resto_lsacceptor;
      options.GetStringValue("line_search_method", resto_lsacceptor, "resto." + prefix);
      if( resto_lsacceptor == "filter" )
      {
         resto_LSacceptor = new FilterLSAcceptor(GetRawPtr(resto_PDSolver));
      }
      else if( resto_lsacceptor == "cg-penalty" )
      {
         resto_LSacceptor = new CGPenaltyLSAcceptor(GetRawPtr(resto_PDSolver));
      }
      else if( resto_lsacceptor == "penalty" )
      {
         resto_LSacceptor = new PenaltyLSAcceptor(GetRawPtr(resto_PDSolver));
      }
      SmartPtr<LineSearch> resto_LineSearch = new BacktrackingLineSearch(resto_LSacceptor, GetRawPtr(resto_resto),
         GetRawPtr(resto_convCheck));

      // Create the mu update that will be used by the restoration phase
      // algorithm
      SmartPtr<MuUpdate> resto_MuUpdate;
      std::string resto_smuupdate;
      if( !options.GetStringValue("mu_strategy", resto_smuupdate, "resto." + prefix) )
      {
         // Change default for quasi-Newton option (then we use adaptive)
         if( hessian_approximation == LIMITED_MEMORY )
         {
            resto_smuupdate = "adaptive";
         }
      }

      std::string resto_smuoracle;
      std::string resto_sfixmuoracle;
      if( resto_smuupdate == "adaptive" )
      {
         options.GetStringValue("mu_oracle", resto_smuoracle, "resto." + prefix);
         options.GetStringValue("fixed_mu_oracle", resto_sfixmuoracle, "resto." + prefix);
      }

      if( resto_smuupdate == "monotone" )
      {
         resto_MuUpdate = new MonotoneMuUpdate(GetRawPtr(resto_LineSearch));
      }
      else if( resto_smuupdate == "adaptive" )
      {
         SmartPtr<MuOracle> resto_MuOracle;
         if( resto_smuoracle == "loqo" )
         {
            resto_MuOracle = new LoqoMuOracle();
         }
         else if( resto_smuoracle == "probing" )
         {
            resto_MuOracle = new ProbingMuOracle(resto_PDSolver);
         }
         else if( resto_smuoracle == "quality-function" )
         {
            resto_MuOracle = new QualityFunctionMuOracle(resto_PDSolver);
         }
         SmartPtr<MuOracle> resto_FixMuOracle;
         if( resto_sfixmuoracle == "loqo" )
         {
            resto_FixMuOracle = new LoqoMuOracle();
         }
         else if( resto_sfixmuoracle == "probing" )
         {
            resto_FixMuOracle = new ProbingMuOracle(resto_PDSolver);
         }
         else if( resto_sfixmuoracle == "quality-function" )
         {
            resto_FixMuOracle = new QualityFunctionMuOracle(resto_PDSolver);
         }
         else
         {
            resto_FixMuOracle = NULL;
         }
         resto_MuUpdate = new AdaptiveMuUpdate(GetRawPtr(resto_LineSearch), resto_MuOracle, resto_FixMuOracle);
      }

      // Initialization of the iterates for the restoration phase
      SmartPtr<EqMultiplierCalculator> resto_EqMultCalculator = new LeastSquareMultipliers(*resto_AugSolver);
      SmartPtr<IterateInitializer> resto_IterInitializer = new RestoIterateInitializer(resto_EqMultCalculator);

      // Create the object for the iteration output during restoration
      SmartPtr<OrigIterationOutput> resto_OrigIterOutput = NULL;
      //   new OrigIterationOutput();
      SmartPtr<IterationOutput> resto_IterOutput = new RestoIterationOutput(resto_OrigIterOutput);

      // Get the Hessian updater for the restoration phase
      SmartPtr<HessianUpdater> resto_HessUpdater;
      switch( hessian_approximation )
      {
         case EXACT:
            resto_HessUpdater = new ExactHessianUpdater();
            break;
         case LIMITED_MEMORY:
            // ToDo This needs to be replaced!
            resto_HessUpdater = new LimMemQuasiNewtonUpdater(true);
            break;
      }

      // Put together the overall restoration phase IP algorithm
      SmartPtr<SearchDirectionCalculator> resto_SearchDirCalc;
      if( resto_lsacceptor == "cg-penalty" )
      {
         resto_SearchDirCalc = new CGSearchDirCalculator(GetRawPtr(resto_PDSolver));
      }
      else
      {
         resto_SearchDirCalc = new PDSearchDirCalculator(GetRawPtr(resto_PDSolver));
      }

      SmartPtr<IpoptAlgorithm> resto_alg = new IpoptAlgorithm(resto_SearchDirCalc, GetRawPtr(resto_LineSearch),
         GetRawPtr(resto_MuUpdate), GetRawPtr(resto_convCheck), resto_IterInitializer, resto_IterOutput,
         resto_HessUpdater, resto_EqMultCalculator);

      // Set the restoration phase
      resto_phase = new MinC_1NrmRestorationPhase(*resto_alg, EqMultCalculator_);
   }

   // Create the line search to be used by the main algorithm
   SmartPtr<BacktrackingLSAcceptor> LSacceptor;
   if( lsmethod == "filter" )
   {
      LSacceptor = new FilterLSAcceptor(GetRawPtr(GetPDSystemSolver(jnlst, options, prefix)));
   }
   else if( lsmethod == "cg-penalty" )
   {
      LSacceptor = new CGPenaltyLSAcceptor(GetRawPtr(GetPDSystemSolver(jnlst, options, prefix)));
   }
   else if( lsmethod == "penalty" )
   {
      LSacceptor = new PenaltyLSAcceptor(GetRawPtr(GetPDSystemSolver(jnlst, options, prefix)));
   }
   SmartPtr<LineSearch> LineSearch = new BacktrackingLineSearch(LSacceptor, GetRawPtr(resto_phase), ConvCheck_);

   // The following cross reference is not good: We have to store a
   // pointer to the LineSearch_ object in resto_convCheck as a
   // non-SmartPtr to make sure that things are properly deleted when
   // the IpoptAlgorithm returned by the Builder is destructed.
   if( IsValid(resto_convCheck) )
   {
      resto_convCheck->SetOrigLSAcceptor(*LSacceptor);
   }

   return LineSearch;
}
コード例 #17
0
ファイル: IpNLPScaling.cpp プロジェクト: AyMaN-GhOsT/simbody
  void StandardScalingBase::DetermineScaling(
    const SmartPtr<const VectorSpace> x_space,
    const SmartPtr<const VectorSpace> c_space,
    const SmartPtr<const VectorSpace> d_space,
    const SmartPtr<const MatrixSpace> jac_c_space,
    const SmartPtr<const MatrixSpace> jac_d_space,
    const SmartPtr<const SymMatrixSpace> h_space,
    SmartPtr<const MatrixSpace>& new_jac_c_space,
    SmartPtr<const MatrixSpace>& new_jac_d_space,
    SmartPtr<const SymMatrixSpace>& new_h_space)
  {
    SmartPtr<Vector> dc;
    SmartPtr<Vector> dd;
    DetermineScalingParametersImpl(x_space, c_space, d_space,
                                   jac_c_space, jac_d_space,
                                   h_space, df_, dx_, dc, dd);

    df_ *= obj_scaling_factor_;

    if (Jnlst().ProduceOutput(J_VECTOR, J_MAIN)) {
      Jnlst().Printf(J_VECTOR, J_MAIN, "objective scaling factor = %g\n", df_);
      if (IsValid(dx_)) {
        dx_->Print(Jnlst(), J_VECTOR, J_MAIN, "x scaling vector");
      }
      else {
        Jnlst().Printf(J_VECTOR, J_MAIN, "No x scaling provided\n");
      }
      if (IsValid(dc)) {
        dc->Print(Jnlst(), J_VECTOR, J_MAIN, "c scaling vector");
      }
      else {
        Jnlst().Printf(J_VECTOR, J_MAIN, "No c scaling provided\n");
      }
      if (IsValid(dd)) {
        dd->Print(Jnlst(), J_VECTOR, J_MAIN, "d scaling vector");
      }
      else {
        Jnlst().Printf(J_VECTOR, J_MAIN, "No d scaling provided\n");
      }
    }

    // create the scaling matrix spaces
    if (IsValid(dx_) || IsValid(dc)) {
      scaled_jac_c_space_ =
        new ScaledMatrixSpace(ConstPtr(dc), false, jac_c_space,
                              ConstPtr(dx_), true);
      new_jac_c_space = GetRawPtr(scaled_jac_c_space_);
    }
    else {
      scaled_jac_c_space_ = NULL;
      new_jac_c_space = jac_c_space;
    }

    if (IsValid(dx_) || IsValid(dc)) {
      scaled_jac_d_space_ =
        new ScaledMatrixSpace(ConstPtr(dd), false, jac_d_space,
                              ConstPtr(dx_), true);
      new_jac_d_space = GetRawPtr(scaled_jac_d_space_);
    }
    else {
      scaled_jac_d_space_ = NULL;
      new_jac_d_space =jac_d_space ;
    }

    if (IsValid(h_space)) {
      if (IsValid(dx_)) {
        scaled_h_space_ = new SymScaledMatrixSpace(ConstPtr(dx_), true, h_space);
        new_h_space = GetRawPtr(scaled_h_space_);
      }
      else {
        scaled_h_space_ = NULL;
        new_h_space = h_space;
      }
    }
    else {
      new_h_space = NULL;
    }
  }
コード例 #18
0
SmartPtr<MuUpdate> AlgorithmBuilder::BuildMuUpdate(
   const Journalist&     jnlst,
   const OptionsList&    options,
   const std::string&    prefix
   )
{
   DBG_ASSERT(IsValid(LineSearch_));

   bool mehrotra_algorithm;
   options.GetBoolValue("mehrotra_algorithm", mehrotra_algorithm, prefix);

   // Create the mu update that will be used by the main algorithm
   SmartPtr<MuUpdate> MuUpdate;
   std::string smuupdate;
   if( !options.GetStringValue("mu_strategy", smuupdate, prefix) )
   {
      // Change default for quasi-Newton option (then we use adaptive)
      Index enum_int;
      if( options.GetEnumValue("hessian_approximation", enum_int, prefix) )
      {
         HessianApproximationType hessian_approximation = HessianApproximationType(enum_int);
         if( hessian_approximation == LIMITED_MEMORY )
         {
            smuupdate = "adaptive";
         }
      }
      if( mehrotra_algorithm )
      {
         smuupdate = "adaptive";
      }
   }
   ASSERT_EXCEPTION(!mehrotra_algorithm || smuupdate == "adaptive", OPTION_INVALID,
      "If mehrotra_algorithm=yes, mu_strategy must be \"adaptive\".");
   std::string smuoracle;
   std::string sfixmuoracle;
   if( smuupdate == "adaptive" )
   {
      if( !options.GetStringValue("mu_oracle", smuoracle, prefix) )
      {
         if( mehrotra_algorithm )
         {
            smuoracle = "probing";
         }
      }
      options.GetStringValue("fixed_mu_oracle", sfixmuoracle, prefix);
      ASSERT_EXCEPTION(!mehrotra_algorithm || smuoracle == "probing", OPTION_INVALID,
         "If mehrotra_algorithm=yes, mu_oracle must be \"probing\".");
   }

   if( smuupdate == "monotone" )
   {
      MuUpdate = new MonotoneMuUpdate(GetRawPtr(LineSearch_));
   }
   else if( smuupdate == "adaptive" )
   {
      SmartPtr<MuOracle> muOracle;
      if( smuoracle == "loqo" )
      {
         muOracle = new LoqoMuOracle();
      }
      else if( smuoracle == "probing" )
      {
         muOracle = new ProbingMuOracle(GetPDSystemSolver(jnlst, options, prefix));
      }
      else if( smuoracle == "quality-function" )
      {
         muOracle = new QualityFunctionMuOracle(GetPDSystemSolver(jnlst, options, prefix));
      }
      SmartPtr<MuOracle> FixMuOracle;
      if( sfixmuoracle == "loqo" )
      {
         FixMuOracle = new LoqoMuOracle();
      }
      else if( sfixmuoracle == "probing" )
      {
         FixMuOracle = new ProbingMuOracle(GetPDSystemSolver(jnlst, options, prefix));
      }
      else if( sfixmuoracle == "quality-function" )
      {
         FixMuOracle = new QualityFunctionMuOracle(GetPDSystemSolver(jnlst, options, prefix));
      }
      else
      {
         FixMuOracle = NULL;
      }
      MuUpdate = new AdaptiveMuUpdate(GetRawPtr(LineSearch_), muOracle, FixMuOracle);
   }
   return MuUpdate;
}
コード例 #19
0
void RestoIpoptNLP::GetSpaces(
   SmartPtr<const VectorSpace>&    x_space,
   SmartPtr<const VectorSpace>&    c_space,
   SmartPtr<const VectorSpace>&    d_space,
   SmartPtr<const VectorSpace>&    x_l_space,
   SmartPtr<const MatrixSpace>&    px_l_space,
   SmartPtr<const VectorSpace>&    x_u_space,
   SmartPtr<const MatrixSpace>&    px_u_space,
   SmartPtr<const VectorSpace>&    d_l_space,
   SmartPtr<const MatrixSpace>&    pd_l_space,
   SmartPtr<const VectorSpace>&    d_u_space,
   SmartPtr<const MatrixSpace>&    pd_u_space,
   SmartPtr<const MatrixSpace>&    Jac_c_space,
   SmartPtr<const MatrixSpace>&    Jac_d_space,
   SmartPtr<const SymMatrixSpace>& Hess_lagrangian_space
   )
{
   x_space = GetRawPtr(x_space_);
   c_space = GetRawPtr(c_space_);
   d_space = GetRawPtr(d_space_);
   x_l_space = GetRawPtr(x_l_space_);
   px_l_space = GetRawPtr(px_l_space_);
   x_u_space = GetRawPtr(x_u_space_);
   px_u_space = GetRawPtr(px_u_space_);
   d_l_space = GetRawPtr(d_l_space_);
   pd_l_space = GetRawPtr(pd_l_space_);
   d_u_space = GetRawPtr(d_u_space_);
   pd_u_space = GetRawPtr(pd_u_space_);
   Jac_c_space = GetRawPtr(jac_c_space_);
   Jac_d_space = GetRawPtr(jac_d_space_);
   Hess_lagrangian_space = GetRawPtr(h_space_);
}
コード例 #20
0
ファイル: IpRestoIpoptNLP.hpp プロジェクト: imzye/coin
 /** Lower bounds on x */
 virtual SmartPtr<const Vector> x_L() const
 {
     return GetRawPtr(x_L_);
 }
コード例 #21
0
  bool
  RestoRestorationPhase::PerformRestoration()
  {
    DBG_START_METH("RestoRestorationPhase::PerformRestoration",
                   dbg_verbosity);
    Jnlst().Printf(J_DETAILED, J_MAIN,
                   "Performing second level restoration phase for current constriant violation %8.2e\n", IpCq().curr_constraint_violation());

    DBG_ASSERT(IpCq().curr_constraint_violation()>0.);

    // Get a grip on the restoration phase NLP and obtain the pointers
    // to the original NLP data
    SmartPtr<RestoIpoptNLP> resto_ip_nlp =
      static_cast<RestoIpoptNLP*> (&IpNLP());
    DBG_ASSERT(dynamic_cast<RestoIpoptNLP*> (&IpNLP()));
    SmartPtr<IpoptNLP> orig_ip_nlp =
      static_cast<IpoptNLP*> (&resto_ip_nlp->OrigIpNLP());
    DBG_ASSERT(dynamic_cast<IpoptNLP*> (&resto_ip_nlp->OrigIpNLP()));

    // Get the current point and create a new vector for the result
    SmartPtr<const CompoundVector> Ccurr_x =
      static_cast<const CompoundVector*> (GetRawPtr(IpData().curr()->x()));
    DBG_ASSERT(dynamic_cast<const CompoundVector*> (GetRawPtr(IpData().curr()->x())));
    SmartPtr<const CompoundVector> Ccurr_s = 
      static_cast<const CompoundVector*> (GetRawPtr(IpData().curr()->s()));
    DBG_ASSERT(dynamic_cast<const CompoundVector*> (GetRawPtr(IpData().curr()->s())));
    DBG_ASSERT(Ccurr_s->NComps() == 1);
    SmartPtr<Vector> new_x = IpData().curr()->x()->MakeNew();
    SmartPtr<CompoundVector> Cnew_x =
      static_cast<CompoundVector*> (GetRawPtr(new_x));

    // The x values remain unchanged
    SmartPtr<Vector> x = Cnew_x->GetCompNonConst(0);
    x->Copy(*Ccurr_x->GetComp(0));

    // ToDo in free mu mode - what to do here?
    Number mu = IpData().curr_mu();

    // Compute the initial values for the n and p variables for the
    // equality constraints
    Number rho = resto_ip_nlp->Rho();
    SmartPtr<Vector> nc = Cnew_x->GetCompNonConst(1);
    SmartPtr<Vector> pc = Cnew_x->GetCompNonConst(2);
    SmartPtr<const Vector> cvec = orig_ip_nlp->c(*Ccurr_x->GetComp(0));
    SmartPtr<Vector> a = nc->MakeNew();
    SmartPtr<Vector> b = nc->MakeNew();
    a->Set(mu/(2.*rho));
    a->Axpy(-0.5, *cvec);
    b->Copy(*cvec);
    b->Scal(mu/(2.*rho));
    solve_quadratic(*a, *b, *nc);
    pc->Copy(*cvec);
    pc->Axpy(1., *nc);
    DBG_PRINT_VECTOR(2, "nc", *nc);
    DBG_PRINT_VECTOR(2, "pc", *pc);

    // initial values for the n and p variables for the inequality
    // constraints
    SmartPtr<Vector> nd = Cnew_x->GetCompNonConst(3);
    SmartPtr<Vector> pd = Cnew_x->GetCompNonConst(4);
    SmartPtr<Vector> dvec = pd->MakeNew();
    dvec->Copy(*orig_ip_nlp->d(*Ccurr_x->GetComp(0)));
    dvec->Axpy(-1., *Ccurr_s->GetComp(0));
    a = nd->MakeNew();
    b = nd->MakeNew();
    a->Set(mu/(2.*rho));
    a->Axpy(-0.5, *dvec);
    b->Copy(*dvec);
    b->Scal(mu/(2.*rho));
    solve_quadratic(*a, *b, *nd);
    pd->Copy(*dvec);
    pd->Axpy(1., *nd);
    DBG_PRINT_VECTOR(2, "nd", *nd);
    DBG_PRINT_VECTOR(2, "pd", *pd);

    // Now set the trial point to the solution of the restoration phase
    // s and all multipliers remain unchanged
    SmartPtr<IteratesVector> new_trial = IpData().curr()->MakeNewContainer();
    new_trial->Set_x(*new_x);
    IpData().set_trial(new_trial);

    IpData().Append_info_string("R");

    return true;
  }
コード例 #22
0
ファイル: IpRestoIpoptNLP.hpp プロジェクト: imzye/coin
 /** Permutation matrix (x_L_ -> x) */
 virtual SmartPtr<const Matrix> Px_L() const
 {
     return GetRawPtr(Px_L_);
 }
コード例 #23
0
ファイル: ipopt_nlp.cpp プロジェクト: andrescodas/casadi
  bool IpoptUserClass::intermediate_callback(AlgorithmMode mode, Index iter, Number obj_value,
                                             Number inf_pr, Number inf_du,
                                             Number mu, Number d_norm,
                                             Number regularization_size,
                                             Number alpha_du, Number alpha_pr,
                                             Index ls_trials,
                                             const IpoptData* ip_data,
                                             IpoptCalculatedQuantities* ip_cq) {

    // Only do the callback every few iterations
    if (iter % solver_.callback_step_!=0) return true;

    /// Code copied from TNLPAdapter::FinalizeSolution
    /// See also: http://list.coin-or.org/pipermail/ipopt/2010-July/002078.html
    // http://list.coin-or.org/pipermail/ipopt/2010-April/001965.html

    bool full_callback = false;

#ifdef WITH_IPOPT_CALLBACK
    mem_->fstats.at("callback_prep").tic();
    OrigIpoptNLP* orignlp = dynamic_cast<OrigIpoptNLP*>(GetRawPtr(ip_cq->GetIpoptNLP()));
    if (!orignlp) return true;
    TNLPAdapter* tnlp_adapter = dynamic_cast<TNLPAdapter*>(GetRawPtr(orignlp->nlp()));
    if (!tnlp_adapter) return true;

    const Vector& x = *ip_data->curr()->x();
    const Vector& z_L = *ip_data->curr()->z_L();
    const Vector& z_U = *ip_data->curr()->z_U();
    const Vector& c = *ip_cq->curr_c();
    const Vector& d = *ip_cq->curr_d();
    const Vector& y_c = *ip_data->curr()->y_c();
    const Vector& y_d = *ip_data->curr()->y_d();

    std::fill_n(x_, n_, 0);
    std::fill_n(g_, m_, 0);
    std::fill_n(z_L_, n_, 0);
    std::fill_n(z_U_, n_, 0);
    std::fill_n(lambda_, m_, 0);

    tnlp_adapter->ResortX(x, x_);             // no further steps needed
    tnlp_adapter->ResortG(y_c, y_d, lambda_); // no further steps needed
    tnlp_adapter->ResortG(c, d, g_);
    // Copied from Ipopt source: To Ipopt, the equality constraints are presented with right
    // hand side zero, so we correct for the original right hand side.
    const Index* c_pos = tnlp_adapter->P_c_g_->ExpandedPosIndices();
    Index n_c_no_fixed = tnlp_adapter->P_c_g_->NCols();
    for (Index i=0; i<n_c_no_fixed; i++) {
      g_[c_pos[i]] += tnlp_adapter->c_rhs_[i];
    }

    tnlp_adapter->ResortBnds(z_L, z_L_, z_U, z_U_);
    // Copied from Ipopt source: Hopefully the following is correct to recover the bound
    // multipliers for fixed variables (sign ok?)
    if (tnlp_adapter->fixed_variable_treatment_==TNLPAdapter::MAKE_CONSTRAINT &&
        tnlp_adapter->n_x_fixed_>0) {
      const DenseVector* dy_c = static_cast<const DenseVector*>(&y_c);
      Index n_c_no_fixed = y_c.Dim() - tnlp_adapter->n_x_fixed_;
      if (!dy_c->IsHomogeneous()) {
        const Number* values = dy_c->Values();
        for (Index i=0; i<tnlp_adapter->n_x_fixed_; i++) {
          z_L_[tnlp_adapter->x_fixed_map_[i]] = Max(0., -values[n_c_no_fixed+i]);
          z_U_[tnlp_adapter->x_fixed_map_[i]] = Max(0., values[n_c_no_fixed+i]);
        }
      } else {
        double value = dy_c->Scalar();
        for (Index i=0; i<tnlp_adapter->n_x_fixed_; i++) {
          z_L_[tnlp_adapter->x_fixed_map_[i]] = Max(0., -value);
          z_U_[tnlp_adapter->x_fixed_map_[i]] = Max(0.,  value);
        }
      }
    }
    mem_->fstats.at("callback_prep").toc();
    full_callback = true;
#endif // WITH_IPOPT_CALLBACK

    return solver_.intermediate_callback(mem_, x_, z_L_, z_U_, g_, lambda_, obj_value, iter,
                                         inf_pr, inf_du, mu, d_norm, regularization_size,
                                         alpha_du, alpha_pr, ls_trials, full_callback);
  }
コード例 #24
0
  bool
  NLPBoundsRemover::GetSpaces(SmartPtr<const VectorSpace>& x_space,
                              SmartPtr<const VectorSpace>& c_space,
                              SmartPtr<const VectorSpace>& d_space,
                              SmartPtr<const VectorSpace>& x_l_space,
                              SmartPtr<const MatrixSpace>& px_l_space,
                              SmartPtr<const VectorSpace>& x_u_space,
                              SmartPtr<const MatrixSpace>& px_u_space,
                              SmartPtr<const VectorSpace>& d_l_space,
                              SmartPtr<const MatrixSpace>& pd_l_space,
                              SmartPtr<const VectorSpace>& d_u_space,
                              SmartPtr<const MatrixSpace>& pd_u_space,
                              SmartPtr<const MatrixSpace>& Jac_c_space,
                              SmartPtr<const MatrixSpace>& Jac_d_space,
                              SmartPtr<const SymMatrixSpace>& Hess_lagrangian_space,
							  bool bHostInit)
  {
    DBG_START_METH("NLPBoundsRemover::GetSpaces", dbg_verbosity);
    SmartPtr<const VectorSpace> d_space_orig;
    SmartPtr<const VectorSpace> x_l_space_orig;
    SmartPtr<const MatrixSpace> px_l_space_orig;
    SmartPtr<const VectorSpace> x_u_space_orig;
    SmartPtr<const MatrixSpace> px_u_space_orig;
    SmartPtr<const VectorSpace> d_l_space_orig;
    SmartPtr<const MatrixSpace> pd_l_space_orig;
    SmartPtr<const VectorSpace> d_u_space_orig;
    SmartPtr<const MatrixSpace> pd_u_space_orig;
    SmartPtr<const MatrixSpace> Jac_d_space_orig;

    bool retval = nlp_->GetSpaces(x_space, c_space, d_space_orig,
                                  x_l_space_orig, px_l_space_orig,
                                  x_u_space_orig, px_u_space_orig,
                                  d_l_space_orig, pd_l_space_orig,
                                  d_u_space_orig, pd_u_space_orig,
                                  Jac_c_space, Jac_d_space_orig,
                                  Hess_lagrangian_space);
    if (!retval) {
      return retval;
    }
    // Keep a copy of the expansion matrices for the x bounds
    Px_l_orig_ = px_l_space_orig->MakeNew();
    Px_u_orig_ = px_u_space_orig->MakeNew();

    // create the new d_space
    Index total_dim = d_space_orig->Dim() + x_l_space_orig->Dim() +
                      x_u_space_orig->Dim();
    SmartPtr<CompoundVectorSpace> d_space_new =
      new CompoundVectorSpace(3, total_dim);
    d_space_new->SetCompSpace(0, *d_space_orig);
    d_space_new->SetCompSpace(1, *x_l_space_orig);
    d_space_new->SetCompSpace(2, *x_u_space_orig);
    d_space = GetRawPtr(d_space_new);

    // create the new (emply) x_l and x_u spaces, and also the
    // corresponding projection matrix spaces
    x_l_space = new DenseVectorSpace(0);
    x_u_space = new DenseVectorSpace(0);
    px_l_space = new ZeroMatrixSpace(x_space->Dim(),0);
    px_u_space = new ZeroMatrixSpace(x_space->Dim(),0);

    // create the new d_l and d_u vector spaces
    total_dim = d_l_space_orig->Dim() + x_l_space_orig->Dim();
    SmartPtr<CompoundVectorSpace> d_l_space_new =
      new CompoundVectorSpace(2, total_dim);
    d_l_space_new->SetCompSpace(0, *d_l_space_orig);
    d_l_space_new->SetCompSpace(1, *x_l_space_orig);
    d_l_space = GetRawPtr(d_l_space_new);
    total_dim = d_u_space_orig->Dim() + x_u_space_orig->Dim();
    SmartPtr<CompoundVectorSpace> d_u_space_new =
      new CompoundVectorSpace(2, total_dim);
    d_u_space_new->SetCompSpace(0, *d_u_space_orig);
    d_u_space_new->SetCompSpace(1, *x_u_space_orig);
    d_u_space = GetRawPtr(d_u_space_new);

    // create the new d_l and d_u projection matrix spaces
    Index total_rows = d_space_orig->Dim() + x_l_space_orig->Dim() +
                       x_u_space_orig->Dim();
    Index total_cols = d_l_space_orig->Dim() + x_l_space_orig->Dim();
    SmartPtr<CompoundMatrixSpace> pd_l_space_new =
      new CompoundMatrixSpace(3, 2, total_rows, total_cols);
    pd_l_space_new->SetBlockRows(0, d_space_orig->Dim());
    pd_l_space_new->SetBlockRows(1, x_l_space_orig->Dim());
    pd_l_space_new->SetBlockRows(2, x_u_space_orig->Dim());
    pd_l_space_new->SetBlockCols(0, d_l_space_orig->Dim());
    pd_l_space_new->SetBlockCols(1, x_l_space_orig->Dim());
    pd_l_space_new->SetCompSpace(0, 0, *pd_l_space_orig, true);
    SmartPtr<const MatrixSpace> identity_space =
      new IdentityMatrixSpace(x_l_space_orig->Dim());
    pd_l_space_new->SetCompSpace(1, 1, *identity_space, true);
    pd_l_space = GetRawPtr(pd_l_space_new);

    total_cols = d_u_space_orig->Dim() + x_u_space_orig->Dim();
    SmartPtr<CompoundMatrixSpace> pd_u_space_new =
      new CompoundMatrixSpace(3, 2, total_rows, total_cols);
    pd_u_space_new->SetBlockRows(0, d_space_orig->Dim());
    pd_u_space_new->SetBlockRows(1, x_l_space_orig->Dim());
    pd_u_space_new->SetBlockRows(2, x_u_space_orig->Dim());
    pd_u_space_new->SetBlockCols(0, d_u_space_orig->Dim());
    pd_u_space_new->SetBlockCols(1, x_u_space_orig->Dim());
    pd_u_space_new->SetCompSpace(0, 0, *pd_u_space_orig, true);
    identity_space = new IdentityMatrixSpace(x_u_space_orig->Dim());
    pd_u_space_new->SetCompSpace(2, 1, *identity_space, true);
    pd_u_space = GetRawPtr(pd_u_space_new);

    // Jacobian for inequalities matrix space
    total_rows = d_space_orig->Dim() + x_l_space_orig->Dim() +
                 x_u_space_orig->Dim();
    total_cols = x_space->Dim();
    SmartPtr<CompoundMatrixSpace> Jac_d_space_new =
      new CompoundMatrixSpace(3, 1, total_rows, total_cols);
    Jac_d_space_new->SetBlockRows(0, d_space_orig->Dim());
    Jac_d_space_new->SetBlockRows(1, x_l_space_orig->Dim());
    Jac_d_space_new->SetBlockRows(2, x_u_space_orig->Dim());
    Jac_d_space_new->SetBlockCols(0, x_space->Dim());
    Jac_d_space_new->SetCompSpace(0, 0, *Jac_d_space_orig);
    SmartPtr<MatrixSpace> trans_px_l_space_orig =
      new TransposeMatrixSpace(GetRawPtr(px_l_space_orig));
    Jac_d_space_new->SetCompSpace(1, 0, *trans_px_l_space_orig, true);
    SmartPtr<MatrixSpace> trans_px_u_space_orig =
      new TransposeMatrixSpace(GetRawPtr(px_u_space_orig));
    Jac_d_space_new->SetCompSpace(2, 0, *trans_px_u_space_orig, true);
    Jac_d_space = GetRawPtr(Jac_d_space_new);

    // We keep the original d_space around in order to be able to do
    // the sanity check later
    d_space_orig_ = d_space_orig;

    return true;
  }