bool
  NLPBoundsRemover::GetBoundsInformation(const Matrix& Px_L,
                                         Vector& x_L,
                                         const Matrix& Px_U,
                                         Vector& x_U,
                                         const Matrix& Pd_L,
                                         Vector& d_L,
                                         const Matrix& Pd_U,
                                         Vector& d_U)
  {
    const CompoundMatrix* comp_pd_l =
      static_cast<const CompoundMatrix*>(&Pd_L);
    DBG_ASSERT(dynamic_cast<const CompoundMatrix*>(&Pd_L));
    SmartPtr<const Matrix> pd_l_orig = comp_pd_l->GetComp(0,0);

    const CompoundMatrix* comp_pd_u =
      static_cast<const CompoundMatrix*>(&Pd_U);
    DBG_ASSERT(dynamic_cast<const CompoundMatrix*>(&Pd_U));
    SmartPtr<const Matrix> pd_u_orig = comp_pd_u->GetComp(0,0);

    CompoundVector* comp_d_l = static_cast<CompoundVector*>(&d_L);
    DBG_ASSERT(dynamic_cast<CompoundVector*>(&d_L));
    SmartPtr<Vector> d_l_orig = comp_d_l->GetCompNonConst(0);
    SmartPtr<Vector> x_l_orig = comp_d_l->GetCompNonConst(1);

    CompoundVector* comp_d_u = static_cast<CompoundVector*>(&d_U);
    DBG_ASSERT(dynamic_cast<CompoundVector*>(&d_U));
    SmartPtr<Vector> d_u_orig = comp_d_u->GetCompNonConst(0);
    SmartPtr<Vector> x_u_orig = comp_d_u->GetCompNonConst(1);

    // Here we do a santiy check to make sure that no inequality
    // constraint has two non-infite bounds.
    if (d_space_orig_->Dim()>0 && !allow_twosided_inequalities_) {
      SmartPtr<Vector> d = d_space_orig_->MakeNew();
      SmartPtr<Vector> tmp = d_l_orig->MakeNew();
      tmp->Set(1.);
      pd_l_orig->MultVector(1., *tmp, 0., *d);
      tmp = d_u_orig->MakeNew();
      tmp->Set(1.);
      pd_u_orig->MultVector(1., *tmp, 1., *d);
      Number dmax = d->Amax();
      ASSERT_EXCEPTION(dmax==1., INVALID_NLP, "In NLPBoundRemover, an inequality with both lower and upper bounds was detected");
      Number dmin = d->Min();
      ASSERT_EXCEPTION(dmin==1., INVALID_NLP, "In NLPBoundRemover, an inequality with without bounds was detected.");
    }

    bool retval =
      nlp_->GetBoundsInformation(*Px_l_orig_, *x_l_orig, *Px_u_orig_,
                                 *x_u_orig, *pd_l_orig, *d_l_orig,
                                 *pd_u_orig, *d_u_orig);
    return retval;
  }
Exemple #2
0
  void GradientScaling::DetermineScalingParametersImpl(
    const SmartPtr<const VectorSpace> x_space,
    const SmartPtr<const VectorSpace> p_space,
    const SmartPtr<const VectorSpace> c_space,
    const SmartPtr<const VectorSpace> d_space,
    const SmartPtr<const MatrixSpace> jac_c_space,
    const SmartPtr<const MatrixSpace> jac_d_space,
    const SmartPtr<const SymMatrixSpace> h_space,
    const Matrix& Px_L, const Vector& x_L,
    const Matrix& Px_U, const Vector& x_U,
    Number& df,
    SmartPtr<Vector>& dx,
    SmartPtr<Vector>& dc,
    SmartPtr<Vector>& dd)
  {
    DBG_ASSERT(IsValid(nlp_));

    SmartPtr<Vector> x = x_space->MakeNew();
    SmartPtr<Vector> p = p_space->MakeNew();
    if (!nlp_->GetStartingPoint(GetRawPtr(x), true,
				GetRawPtr(p), true,
                                NULL, false,
                                NULL, false,
                                NULL, false,
                                NULL, false)) {
      THROW_EXCEPTION(FAILED_INITIALIZATION,
                      "Error getting initial point from NLP in GradientScaling.\n");
    }

    //
    // Calculate grad_f scaling
    //
    SmartPtr<Vector> grad_f = x_space->MakeNew();
    if (nlp_->Eval_grad_f(*x, *p, *grad_f)) {
      double max_grad_f = grad_f->Amax();
      df = 1.;
      if (scaling_obj_target_gradient_ == 0.) {
        if (max_grad_f > scaling_max_gradient_) {
          df = scaling_max_gradient_ / max_grad_f;
        }
      }
      else {
        if (max_grad_f == 0.) {
          Jnlst().Printf(J_WARNING, J_INITIALIZATION,
                         "Gradient of objective function is zero at starting point.  Cannot determine scaling factor based on scaling_obj_target_gradient option.\n");
        }
        else {
          df = scaling_obj_target_gradient_ / max_grad_f;
        }
      }
      df = Max(df, scaling_min_value_);
      Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
                     "Scaling parameter for objective function = %e\n", df);
    }
    else {
      Jnlst().Printf(J_WARNING, J_INITIALIZATION,
                     "Error evaluating objective gradient at user provided starting point.\n  No scaling factor for objective function computed!\n");
      df = 1.;
    }
    //
    // No x scaling
    //
    dx = NULL;

    dc = NULL;
    if (c_space->Dim()>0) {
      //
      // Calculate c scaling
      //
      SmartPtr<Matrix> jac_c = jac_c_space->MakeNew();
      if (nlp_->Eval_jac_c(*x, *p, *jac_c)) {
        dc = c_space->MakeNew();
        const double dbl_min = std::numeric_limits<double>::min();
        dc->Set(dbl_min);
        jac_c->ComputeRowAMax(*dc, false);
        Number arow_max = dc->Amax();
        if (scaling_constr_target_gradient_<=0.) {
          if (arow_max > scaling_max_gradient_) {
            dc->ElementWiseReciprocal();
            dc->Scal(scaling_max_gradient_);
            SmartPtr<Vector> dummy = dc->MakeNew();
            dummy->Set(1.);
            dc->ElementWiseMin(*dummy);
          }
          else {
            dc = NULL;
          }
        }
        else {
          dc->Set(scaling_constr_target_gradient_/arow_max);
        }
        if (IsValid(dc) && scaling_min_value_ > 0.) {
          SmartPtr<Vector> tmp = dc->MakeNew();
          tmp->Set(scaling_min_value_);
          dc->ElementWiseMax(*tmp);
        }
      }
      else {
        Jnlst().Printf(J_WARNING, J_INITIALIZATION,
                       "Error evaluating Jacobian of equality constraints at user provided starting point.\n  No scaling factors for equality constraints computed!\n");
      }
    }

    dd = NULL;
    if (d_space->Dim()>0) {
      //
      // Calculate d scaling
      //
      SmartPtr<Matrix> jac_d = jac_d_space->MakeNew();
      if (nlp_->Eval_jac_d(*x, *p, *jac_d)) {
        dd = d_space->MakeNew();
        const double dbl_min = std::numeric_limits<double>::min();
        dd->Set(dbl_min);
        jac_d->ComputeRowAMax(*dd, false);
        Number arow_max = dd->Amax();
        if (scaling_constr_target_gradient_<=0.) {
          if (arow_max > scaling_max_gradient_) {
            dd->ElementWiseReciprocal();
            dd->Scal(scaling_max_gradient_);
            SmartPtr<Vector> dummy = dd->MakeNew();
            dummy->Set(1.);
            dd->ElementWiseMin(*dummy);
          }
          else {
            dd = NULL;
          }
        }
        else {
          dd->Set(scaling_constr_target_gradient_/arow_max);
        }
        if (IsValid(dd) && scaling_min_value_ > 0.) {
          SmartPtr<Vector> tmp = dd->MakeNew();
          tmp->Set(scaling_min_value_);
          dd->ElementWiseMax(*tmp);
        }
      }
      else {
        Jnlst().Printf(J_WARNING, J_INITIALIZATION,
                       "Error evaluating Jacobian of inequality constraints at user provided starting point.\n  No scaling factors for inequality constraints computed!\n");
      }
    }
  }
  char CGPenaltyLSAcceptor::UpdatePenaltyParameter()
  {
    DBG_START_METH("CGPenaltyLSAcceptor::UpdatePenaltyParameter",
                   dbg_verbosity);
    char info_alpha_primal_char = 'n';
    // We use the new infeasibility here...
    Number trial_inf = IpCq().trial_primal_infeasibility(NORM_2);
    Jnlst().Printf(J_MOREDETAILED, J_LINE_SEARCH,
                   "trial infeasibility = %8.2\n", trial_inf);
    if (curr_eta_<0.) {
      // We need to initialize the eta tolerance
      curr_eta_ = Max(eta_min_, Min(gamma_tilde_,
                                    gamma_hat_*IpCq().curr_nlp_error()));
    }
    // Check if the penalty parameter is to be increased
    Jnlst().Printf(J_MOREDETAILED, J_LINE_SEARCH,
                   "Starting tests for penalty parameter update:\n");
    bool increase = (trial_inf >= penalty_update_infeasibility_tol_);
    if (!increase) {
      info_alpha_primal_char='i';
    }
    if (increase) {
      Number max_step = Max(CGPenData().delta_cgpen()->x()->Amax(),
                            CGPenData().delta_cgpen()->s()->Amax());
      Jnlst().Printf(J_MOREDETAILED, J_LINE_SEARCH,
                     "Max norm of step = %8.2\n", max_step);
      increase = (max_step <= curr_eta_);
      if (!increase) {
        info_alpha_primal_char='d';
      }
    }
    // Lifeng: Should we use the new complementarity here?  If so, I
    // have to restructure BacktrackingLineSearch
    Number mu = IpData().curr_mu();
    if (increase) {
      Number min_compl = mu;
      Number max_compl = mu;
      if (IpNLP().x_L()->Dim()>0) {
        SmartPtr<const Vector> compl_x_L = IpCq().curr_compl_x_L();
        min_compl = Min(min_compl, compl_x_L->Min());
        max_compl = Max(max_compl, compl_x_L->Max());
      }
      if (IpNLP().x_U()->Dim()>0) {
        SmartPtr<const Vector> compl_x_U = IpCq().curr_compl_x_U();
        min_compl = Min(min_compl, compl_x_U->Min());
        max_compl = Max(max_compl, compl_x_U->Max());
      }
      if (IpNLP().d_L()->Dim()>0) {
        SmartPtr<const Vector> compl_s_L = IpCq().curr_compl_s_L();
        min_compl = Min(min_compl, compl_s_L->Min());
        max_compl = Max(max_compl, compl_s_L->Max());
      }
      if (IpNLP().d_U()->Dim()>0) {
        SmartPtr<const Vector> compl_s_U = IpCq().curr_compl_s_U();
        min_compl = Min(min_compl, compl_s_U->Min());
        max_compl = Max(max_compl, compl_s_U->Max());
      }
      Jnlst().Printf(J_MOREDETAILED, J_LINE_SEARCH,
                     "Minimal compl = %8.2\n", min_compl);
      Jnlst().Printf(J_MOREDETAILED, J_LINE_SEARCH,
                     "Maximal compl = %8.2\n", max_compl);
      increase = (min_compl >= mu*penalty_update_compl_tol_ &&
                  max_compl <= mu/penalty_update_compl_tol_);
      if (!increase) {
        info_alpha_primal_char='c';
      }
    }
    // Lifeng: Here I'm using the information from the current step
    // and the current infeasibility
    if (increase) {
      SmartPtr<Vector> vec = IpData().curr()->y_c()->MakeNewCopy();
      vec->AddTwoVectors(1., *CGPenData().delta_cgpen()->y_c(),
                         -1./CGPenCq().curr_cg_pert_fact(), *IpCq().curr_c(),
                         1.);
      Number omega_test = vec->Amax();
      Jnlst().Printf(J_MOREDETAILED, J_LINE_SEARCH,
                     "omega_test for c = %8.2\n", omega_test);
      increase = (omega_test < curr_eta_);
      if (increase) {
        SmartPtr<Vector> vec = IpData().curr()->y_d()->MakeNewCopy();
        vec->AddTwoVectors(1., *IpData().delta()->y_d(),
                           -1./CGPenCq().curr_cg_pert_fact(), *IpCq().curr_d_minus_s(),
                           1.);
        omega_test = vec->Amax();
        Jnlst().Printf(J_MOREDETAILED, J_LINE_SEARCH,
                       "omega_test for d = %8.2\n", omega_test);
        increase = (omega_test < curr_eta_);
      }
      if (!increase) {
        info_alpha_primal_char='m';
      }
    }
    if (increase) {
      // Ok, now we should increase the penalty parameter
      counter_first_type_penalty_updates_++;
      // Update the eta tolerance
      curr_eta_ = Max(eta_min_, curr_eta_/2.);
      Jnlst().Printf(J_MOREDETAILED, J_LINE_SEARCH,
                     "Updating eta to = %8.2\n", curr_eta_);
      Number penalty = CGPenData().curr_kkt_penalty();
      Number y_full_step_max;
      SmartPtr<Vector> vec = IpData().curr()->y_c()->MakeNew();
      vec->AddTwoVectors(1., *IpData().curr()->y_c(),
                         1., *CGPenData().delta_cgpen()->y_c(), 0.);
      y_full_step_max = vec->Amax();
      vec = IpData().curr()->y_d()->MakeNew();
      vec->AddTwoVectors(1., *IpData().curr()->y_d(),
                         1., *CGPenData().delta_cgpen()->y_d(), 0.);
      y_full_step_max = Max(y_full_step_max, vec->Amax());
      if (IpCq().curr_primal_infeasibility(NORM_2) >= epsilon_c_) {
        penalty = Max(chi_hat_*penalty, y_full_step_max + 1.);
        info_alpha_primal_char = 'l';
      }
      else {
        penalty = Max(chi_tilde_*penalty, chi_cup_*y_full_step_max);
        info_alpha_primal_char = 's';
      }
      if (penalty > penalty_max_) {
        THROW_EXCEPTION(IpoptException, "Penalty parameter becomes too large.");
      }
      CGPenData().Set_kkt_penalty(penalty);
      if (CGPenData().NeverTryPureNewton()) {
        CGPenData().Set_penalty(penalty);
      }
    }

    // Second heuristic update for penalty parameters
    if (IpData().curr()->y_c()->Dim() + IpData().curr()->y_d()->Dim() > 0 &&
        !never_use_piecewise_penalty_ls_) {
      Number scaled_y_Amax = CGPenCq().curr_scaled_y_Amax();
      if (scaled_y_Amax <= 1e4 ||
          counter_second_type_penalty_updates_ < 5) {
        Number result;
        SmartPtr<const Vector> ty_c = IpData().curr()->y_c();
        SmartPtr<const Vector> ty_d = IpData().curr()->y_d();
        SmartPtr<const Vector> dy_c = IpData().delta()->y_c();
        SmartPtr<const Vector> dy_d = IpData().delta()->y_d();
        Number curr_inf = IpCq().curr_primal_infeasibility(NORM_2);
        result = dy_c->Dot(*IpCq().curr_c()) + dy_d->Dot(*IpCq().curr_d_minus_s());
        if (!CGPenData().HaveCgFastDeltas()) {
          result += ty_c->Dot(*IpCq().curr_c()) + ty_d->Dot(*IpCq().curr_d_minus_s());
        }
        Number k_pen = CGPenData().curr_kkt_penalty();
        if (result > 0.5*k_pen*curr_inf || result < -0.5*k_pen*curr_inf) {
          Number nrm2_y = CGPenCq().curr_added_y_nrm2();
          result = 5.*nrm2_y;
          CGPenData().Set_kkt_penalty(result);
          if (CGPenData().NeverTryPureNewton()) {
            CGPenData().Set_penalty(result);
          }
          if (scaled_y_Amax > 1e4) {
            counter_second_type_penalty_updates_++;
          }
        }
      }
    }
    return info_alpha_primal_char;
  }