Ejemplo n.º 1
0
bool InexactSearchDirCalculator::InitializeImpl(
   const OptionsList& options,
   const std::string& prefix
   )
{
   options.GetNumericValue("local_inf_Ac_tol", local_inf_Ac_tol_, prefix);
   Index enum_int;
   options.GetEnumValue("inexact_step_decomposition", enum_int, prefix);
   decomposition_type_ = DecompositionTypeEnum(enum_int);

   bool compute_normal = false;
   switch( decomposition_type_ )
   {
      case ALWAYS:
         compute_normal = true;
         break;
      case ADAPTIVE:
      case SWITCH_ONCE:
         compute_normal = false;
         break;
   }

   InexData().set_compute_normal(compute_normal);
   InexData().set_next_compute_normal(compute_normal);

   bool retval = inexact_pd_solver_->Initialize(Jnlst(), IpNLP(), IpData(), IpCq(), options, prefix);
   if( !retval )
   {
      return false;
   }
   return normal_step_calculator_->Initialize(Jnlst(), IpNLP(), IpData(), IpCq(), options, prefix);
}
Ejemplo n.º 2
0
char InexactLSAcceptor::UpdateForNextIteration(
   Number alpha_primal_test
   )
{
   // If normal step has not been computed and alpha is too small,
   // set next_compute_normal to true..
   bool compute_normal = InexData().compute_normal();
   if( !compute_normal && alpha_primal_test < inexact_decomposition_activate_tol_ )
   {
      Jnlst().Printf(J_MOREDETAILED, J_LINE_SEARCH,
         "Setting next_compute_normal to 1 since %8.2e < inexact_decomposition_activate_tol\n", alpha_primal_test);
      InexData().set_next_compute_normal(true);
   }
   // If normal step has been computed and acceptable alpha is large,
   // set next_compute_normal to false
   if( compute_normal && alpha_primal_test > inexact_decomposition_inactivate_tol_ )
   {
      Jnlst().Printf(J_MOREDETAILED, J_LINE_SEARCH,
         "Setting next_compute_normal to 0 since %8.2e > inexact_decomposition_inactivate_tol\n", alpha_primal_test);
      InexData().set_next_compute_normal(false);
   }

   char info_alpha_primal_char = 'k';
   // Augment the filter if required
   if( last_nu_ != nu_ )
   {
      info_alpha_primal_char = 'n';
      char snu[40];
      sprintf(snu, " nu=%8.2e", nu_);
      IpData().Append_info_string(snu);
   }
   if( flexible_penalty_function_ && last_nu_low_ != nu_low_ )
   {
      char snu[40];
      sprintf(snu, " nl=%8.2e", nu_low_);
      IpData().Append_info_string(snu);
      if( info_alpha_primal_char == 'k' )
      {
         info_alpha_primal_char = 'l';
      }
      else
      {
         info_alpha_primal_char = 'b';
      }
   }

   if( accepted_by_low_only_ )
   {
      info_alpha_primal_char = (char) toupper(info_alpha_primal_char);
   }

   if( alpha_primal_test == 1. && watchdog_pred_ == 1e300 )
   {
      InexData().set_full_step_accepted(true);

   }
   return info_alpha_primal_char;
}
Ejemplo n.º 3
0
  void InexactLSAcceptor::Reset()
  {
    DBG_START_FUN("InexactLSAcceptor::Reset", dbg_verbosity);

    nu_ = nu_init_;
    if (flexible_penalty_function_) {
      nu_low_ = nu_low_init_;
    }
    InexData().set_curr_nu(nu_);
  }
  ESymSolverStatus IterativePardisoSolverInterface::Solve(const Index* ia,
      const Index* ja,
      Index nrhs,
      double *rhs_vals)
  {
    DBG_START_METH("IterativePardisoSolverInterface::Solve",dbg_verbosity);

    DBG_ASSERT(nrhs==1);

    if (HaveIpData()) {
      IpData().TimingStats().LinearSystemBackSolve().Start();
    }
    // Call Pardiso to do the solve for the given right-hand sides
    ipfint PHASE = 33;
    ipfint N = dim_;
    ipfint PERM;   // This should not be accessed by Pardiso
    ipfint NRHS = nrhs;
    double* X = new double[nrhs*dim_];
    double* ORIG_RHS = new double[nrhs*dim_];
    ipfint ERROR;

    // Initialize solution with zero and save right hand side
    for (int i = 0; i < N; i++) {
      X[i] = 0;
      ORIG_RHS[i] = rhs_vals[i];
    }

    // Dump matrix to file if requested
    Index iter_count = 0;
    if (HaveIpData()) {
      iter_count = IpData().iter_count();
    }
    write_iajaa_matrix (N, ia, ja, a_, rhs_vals, iter_count, debug_cnt_);

    IterativeSolverTerminationTester* tester;

    int attempts = 0;
    const int max_attempts = pardiso_max_droptol_corrections_+1;

    bool is_normal = false;
    if (IsNull(InexData().normal_x()) && InexData().compute_normal()) {
      tester = GetRawPtr(normal_tester_);
      is_normal = true;
    }
    else {
      tester = GetRawPtr(pd_tester_);
    }
    global_tester_ptr_ = tester;

    while (attempts<max_attempts) {
      bool retval = tester->InitializeSolve();
      ASSERT_EXCEPTION(retval, INTERNAL_ABORT, "tester->InitializeSolve(); returned false");

      for (int i = 0; i < N; i++) {
        rhs_vals[i] = ORIG_RHS[i];
      }

      DPARM_[ 8] = 25; // non_improvement in SQMR iteration
      F77_FUNC(pardiso,PARDISO)(PT_, &MAXFCT_, &MNUM_, &MTYPE_,
                                &PHASE, &N, a_, ia, ja, &PERM,
                                &NRHS, IPARM_, &MSGLVL_, rhs_vals, X,
                                &ERROR, DPARM_);

      if (ERROR <= -100 && ERROR >= -110) {
        Jnlst().Printf(J_WARNING, J_LINEAR_ALGEBRA,
                       "Iterative solver in Pardiso did not converge (ERROR = %d)\n", ERROR);
        Jnlst().Printf(J_WARNING, J_LINEAR_ALGEBRA,
                       "  Decreasing drop tolerances from DPARM_[ 4] = %e and DPARM_[ 5] = %e ", DPARM_[ 4], DPARM_[ 5]);
        if (is_normal) {
          Jnlst().Printf(J_WARNING, J_LINEAR_ALGEBRA,
                         "(normal step)\n");
        }
        else {
          Jnlst().Printf(J_WARNING, J_LINEAR_ALGEBRA,
                         "(PD step)\n");
        }
        PHASE = 23;
        DPARM_[ 4] *= decr_factor_;
        DPARM_[ 5] *= decr_factor_;
        Jnlst().Printf(J_WARNING, J_LINEAR_ALGEBRA,
                       "                               to DPARM_[ 4] = %e and DPARM_[ 5] = %e\n", DPARM_[ 4], DPARM_[ 5]);
        // Copy solution back to y to get intial values for the next iteration
        attempts++;
        ERROR = 0;
      }
      else  {
        attempts = max_attempts;
        Index iterations_used = tester->GetSolverIterations();
        if (is_normal) {
          Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                         "Number of iterations in Pardiso iterative solver for normal step = %d.\n", iterations_used);
        }
        else {
          Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                         "Number of iterations in Pardiso iterative solver for PD step = %d.\n", iterations_used);
        }
      }
      tester->Clear();
    }

    if (is_normal) {
      if (DPARM_[4] < normal_pardiso_iter_dropping_factor_) {
        Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                       "Increasing drop tolerances from DPARM_[ 4] = %e and DPARM_[ 5] = %e (normal step\n", DPARM_[ 4], DPARM_[ 5]);
      }
      normal_pardiso_iter_dropping_factor_used_ =
        Min(DPARM_[4]/decr_factor_, normal_pardiso_iter_dropping_factor_);
      normal_pardiso_iter_dropping_schur_used_ =
        Min(DPARM_[5]/decr_factor_, normal_pardiso_iter_dropping_schur_);
      if (DPARM_[4] < normal_pardiso_iter_dropping_factor_) {
        Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                       "                             to DPARM_[ 4] = %e and DPARM_[ 5] = %e for next iteration.\n", normal_pardiso_iter_dropping_factor_used_, normal_pardiso_iter_dropping_schur_used_);
      }
    }
    else {
      if (DPARM_[4] < pardiso_iter_dropping_factor_) {
        Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                       "Increasing drop tolerances from DPARM_[ 4] = %e and DPARM_[ 5] = %e (PD step\n", DPARM_[ 4], DPARM_[ 5]);
      }
      pardiso_iter_dropping_factor_used_ =
        Min(DPARM_[4]/decr_factor_, pardiso_iter_dropping_factor_);
      pardiso_iter_dropping_schur_used_ =
        Min(DPARM_[5]/decr_factor_, pardiso_iter_dropping_schur_);
      if (DPARM_[4] < pardiso_iter_dropping_factor_) {
        Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                       "                             to DPARM_[ 4] = %e and DPARM_[ 5] = %e for next iteration.\n", pardiso_iter_dropping_factor_used_, pardiso_iter_dropping_schur_used_);
      }
    }

    delete [] X;
    delete [] ORIG_RHS;

    if (IPARM_[6] != 0) {
      Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                     "Number of iterative refinement steps = %d.\n", IPARM_[6]);
      if (HaveIpData()) {
        IpData().Append_info_string("Pi");
      }
    }

    if (HaveIpData()) {
      IpData().TimingStats().LinearSystemBackSolve().End();
    }
    if (ERROR!=0 ) {
      Jnlst().Printf(J_ERROR, J_LINEAR_ALGEBRA,
                     "Error in Pardiso during solve phase.  ERROR = %d.\n", ERROR);
      return SYMSOLVER_FATAL_ERROR;
    }
    if (test_result_ == IterativeSolverTerminationTester::MODIFY_HESSIAN) {
      Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                     "Termination tester requests modification of Hessian\n");
      return SYMSOLVER_WRONG_INERTIA;
    }
#if 0
    // FRANK: look at this:
    if (test_result_ == IterativeSolverTerminationTester::CONTINUE) {
      if (InexData().compute_normal()) {
        Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                       "Termination tester not satisfied!!! Pretend singular\n");
        return SYMSOLVER_SINGULAR;
      }
    }
#endif
    if (test_result_ == IterativeSolverTerminationTester::TEST_2_SATISFIED) {
      // Termination Test 2 is satisfied, set the step for the primal
      // iterates to zero
      Index nvars = IpData().curr()->x()->Dim() + IpData().curr()->s()->Dim();
      const Number zero = 0.;
      IpBlasDcopy(nvars, &zero, 0, rhs_vals, 1);
    }
    return SYMSOLVER_SUCCESS;
  }
  ESymSolverStatus
  IterativePardisoSolverInterface::Factorization(const Index* ia,
      const Index* ja,
      bool check_NegEVals,
      Index numberOfNegEVals)
  {
    DBG_START_METH("IterativePardisoSolverInterface::Factorization",dbg_verbosity);

    // Call Pardiso to do the factorization
    ipfint PHASE ;
    ipfint N = dim_;
    ipfint PERM;   // This should not be accessed by Pardiso
    ipfint NRHS = 0;
    double B;  // This should not be accessed by Pardiso in factorization
    // phase
    double X;  // This should not be accessed by Pardiso in factorization
    // phase
    ipfint ERROR;

    bool done = false;
    bool just_performed_symbolic_factorization = false;

    while (!done) {
      bool is_normal = false;
      if (IsNull(InexData().normal_x()) && InexData().compute_normal()) {
        is_normal = true;
      }
      if (!have_symbolic_factorization_) {
        if (HaveIpData()) {
          IpData().TimingStats().LinearSystemSymbolicFactorization().Start();
        }
        PHASE = 11;
        Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                       "Calling Pardiso for symbolic factorization.\n");

        if (is_normal) {
          DPARM_[ 0] = normal_pardiso_max_iter_;
          DPARM_[ 1] = normal_pardiso_iter_relative_tol_;
          DPARM_[ 2] = normal_pardiso_iter_coarse_size_;
          DPARM_[ 3] = normal_pardiso_iter_max_levels_;
          DPARM_[ 4] = normal_pardiso_iter_dropping_factor_used_;
          DPARM_[ 5] = normal_pardiso_iter_dropping_schur_used_;
          DPARM_[ 6] = normal_pardiso_iter_max_row_fill_;
          DPARM_[ 7] = normal_pardiso_iter_inverse_norm_factor_;
        }
        else {
          DPARM_[ 0] = pardiso_max_iter_;
          DPARM_[ 1] = pardiso_iter_relative_tol_;
          DPARM_[ 2] = pardiso_iter_coarse_size_;
          DPARM_[ 3] = pardiso_iter_max_levels_;
          DPARM_[ 4] = pardiso_iter_dropping_factor_used_;
          DPARM_[ 5] = pardiso_iter_dropping_schur_used_;
          DPARM_[ 6] = pardiso_iter_max_row_fill_;
          DPARM_[ 7] = pardiso_iter_inverse_norm_factor_;
        }
        DPARM_[ 8] = 25; // maximum number of non-improvement steps

        F77_FUNC(pardiso,PARDISO)(PT_, &MAXFCT_, &MNUM_, &MTYPE_,
                                  &PHASE, &N, a_, ia, ja, &PERM,
                                  &NRHS, IPARM_, &MSGLVL_, &B, &X,
                                  &ERROR, DPARM_);
        if (HaveIpData()) {
          IpData().TimingStats().LinearSystemSymbolicFactorization().End();
        }
        if (ERROR==-7) {
          Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                         "Pardiso symbolic factorization returns ERROR = %d.  Matrix is singular.\n", ERROR);
          return SYMSOLVER_SINGULAR;
        }
        else if (ERROR!=0) {
          Jnlst().Printf(J_ERROR, J_LINEAR_ALGEBRA,
                         "Error in Pardiso during symbolic factorization phase.  ERROR = %d.\n", ERROR);
          return SYMSOLVER_FATAL_ERROR;
        }
        have_symbolic_factorization_ = true;
        just_performed_symbolic_factorization = true;

        Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                       "Memory in KB required for the symbolic factorization  = %d.\n", IPARM_[14]);
        Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                       "Integer memory in KB required for the numerical factorization  = %d.\n", IPARM_[15]);
        Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                       "Double  memory in KB required for the numerical factorization  = %d.\n", IPARM_[16]);
      }

      PHASE = 22;

      if (HaveIpData()) {
        IpData().TimingStats().LinearSystemFactorization().Start();
      }
      Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                     "Calling Pardiso for factorization.\n");
      // Dump matrix to file, and count number of solution steps.
      if (HaveIpData()) {
        if (IpData().iter_count() != debug_last_iter_)
          debug_cnt_ = 0;
        debug_last_iter_ = IpData().iter_count();
        debug_cnt_ ++;
      }
      else {
        debug_cnt_ = 0;
        debug_last_iter_ = 0;
      }

      if (is_normal) {
        DPARM_[ 0] = normal_pardiso_max_iter_;
        DPARM_[ 1] = normal_pardiso_iter_relative_tol_;
        DPARM_[ 2] = normal_pardiso_iter_coarse_size_;
        DPARM_[ 3] = normal_pardiso_iter_max_levels_;
        DPARM_[ 4] = normal_pardiso_iter_dropping_factor_used_;
        DPARM_[ 5] = normal_pardiso_iter_dropping_schur_used_;
        DPARM_[ 6] = normal_pardiso_iter_max_row_fill_;
        DPARM_[ 7] = normal_pardiso_iter_inverse_norm_factor_;
      }
      else {
        DPARM_[ 0] = pardiso_max_iter_;
        DPARM_[ 1] = pardiso_iter_relative_tol_;
        DPARM_[ 2] = pardiso_iter_coarse_size_;
        DPARM_[ 3] = pardiso_iter_max_levels_;
        DPARM_[ 4] = pardiso_iter_dropping_factor_used_;
        DPARM_[ 5] = pardiso_iter_dropping_schur_used_;
        DPARM_[ 6] = pardiso_iter_max_row_fill_;
        DPARM_[ 7] = pardiso_iter_inverse_norm_factor_;
      }
      DPARM_[ 8] = 25; // maximum number of non-improvement steps

      F77_FUNC(pardiso,PARDISO)(PT_, &MAXFCT_, &MNUM_, &MTYPE_,
                                &PHASE, &N, a_, ia, ja, &PERM,
                                &NRHS, IPARM_, &MSGLVL_, &B, &X,
                                &ERROR, DPARM_);
      if (HaveIpData()) {
        IpData().TimingStats().LinearSystemFactorization().End();
      }

      if (ERROR==-7) {
        Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
                       "Pardiso factorization returns ERROR = %d.  Matrix is singular.\n", ERROR);
        return SYMSOLVER_SINGULAR;
      }
      else if (ERROR==-4) {
        // I think this means that the matrix is singular
        // OLAF said that this will never happen (ToDo)
        return SYMSOLVER_SINGULAR;
      }
      else if (ERROR!=0 ) {
        Jnlst().Printf(J_ERROR, J_LINEAR_ALGEBRA,
                       "Error in Pardiso during factorization phase.  ERROR = %d.\n", ERROR);
        return SYMSOLVER_FATAL_ERROR;
      }

      negevals_ = Max(IPARM_[22], numberOfNegEVals);
      if (IPARM_[13] != 0) {
        Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                       "Number of perturbed pivots in factorization phase = %d.\n", IPARM_[13]);
        if (HaveIpData()) {
          IpData().Append_info_string("Pp");
        }
        done = true;
      }
      else {
        done = true;
      }
    }

    //  DBG_ASSERT(IPARM_[21]+IPARM_[22] == dim_);

    // Check whether the number of negative eigenvalues matches the requested
    // count
    if (skip_inertia_check_) numberOfNegEVals=negevals_;

    if (check_NegEVals && (numberOfNegEVals!=negevals_)) {
      Jnlst().Printf(J_DETAILED, J_LINEAR_ALGEBRA,
                     "Wrong inertia: required are %d, but we got %d.\n",
                     numberOfNegEVals, negevals_);
      return SYMSOLVER_WRONG_INERTIA;
    }

    return SYMSOLVER_SUCCESS;
  }
Ejemplo n.º 6
0
bool InexactSearchDirCalculator::ComputeSearchDirection()
{
   DBG_START_METH("InexactSearchDirCalculator::ComputeSearchDirection",
      dbg_verbosity);

   // First check if the iterates have converged to a locally
   // infeasible point
   Number curr_scaled_Ac_norm = InexCq().curr_scaled_Ac_norm();
   Jnlst().Printf(J_DETAILED, J_SOLVE_PD_SYSTEM, "curr_scaled_Ac_norm = %e\n", curr_scaled_Ac_norm);
   Number curr_inf = IpCq().curr_primal_infeasibility(NORM_2);
   // ToDo work on termination criteria
   if( curr_scaled_Ac_norm <= local_inf_Ac_tol_ && curr_inf > 1e-4 )
   {
      THROW_EXCEPTION(LOCALLY_INFEASIBLE, "The scaled norm of Ac is satisfying tolerance");
   }

   bool compute_normal = false;
   switch( decomposition_type_ )
   {
      case ALWAYS:
         compute_normal = true;
         break;
      case ADAPTIVE:
         compute_normal = InexData().next_compute_normal();
         break;
      case SWITCH_ONCE:
         compute_normal = InexData().next_compute_normal() || InexData().compute_normal();
         break;
   }

   SmartPtr<Vector> normal_x;
   SmartPtr<Vector> normal_s;
   bool retval;
   SmartPtr<IteratesVector> delta;
   SmartPtr<const IteratesVector> curr = IpData().curr();
   SmartPtr<IteratesVector> rhs;
   SmartPtr<Vector> tmp;

   // Now we set up the primal-dual system for computing the
   // tangential step and the search direction for the multipliers.
   // This is taken from IpPDSearchDirCal.cpp (rev 549).
   // We do not need entries for the variable bound multipliers

   // Upper part of right-hand-side vector is same for both systems
   rhs = curr->MakeNewContainer();
   tmp = curr->x()->MakeNew();
   tmp->AddOneVector(-1., *IpCq().curr_grad_lag_with_damping_x(), 0.);
   rhs->Set_x(*tmp);
   tmp = curr->s()->MakeNew();
   tmp->AddOneVector(-1., *IpCq().curr_grad_lag_with_damping_s(), 0.);
   rhs->Set_s(*tmp);
   tmp = curr->v_L()->MakeNew();
   tmp->AddOneVector(-1., *IpCq().curr_relaxed_compl_s_L(), 0.);
   rhs->Set_v_L(*tmp);
   tmp = curr->v_U()->MakeNew();
   tmp->AddOneVector(-1., *IpCq().curr_relaxed_compl_s_U(), 0.);
   rhs->Set_v_U(*tmp);

   // Loop through algorithms
   bool done = false;
   while( !done )
   {

      InexData().set_compute_normal(compute_normal);
      InexData().set_next_compute_normal(compute_normal);

      if( !compute_normal )
      {
         normal_x = NULL;
         normal_s = NULL;
      }
      else
      {
         retval = normal_step_calculator_->ComputeNormalStep(normal_x, normal_s);
         if( !retval )
         {
            return false;
         }
         // output
         if( Jnlst().ProduceOutput(J_VECTOR, J_SOLVE_PD_SYSTEM) )
         {
            Jnlst().Printf(J_VECTOR, J_SOLVE_PD_SYSTEM, "Normal step (without slack scaling):\n");
            normal_x->Print(Jnlst(), J_VECTOR, J_SOLVE_PD_SYSTEM, "normal_x");
            normal_s->Print(Jnlst(), J_VECTOR, J_SOLVE_PD_SYSTEM, "normal_s");
         }
      }

      // Lower part of right-hand-side vector is different for each system
      if( !compute_normal )
      {
         tmp = curr->y_c()->MakeNew();
         tmp->AddOneVector(-1., *IpCq().curr_c(), 0.);
         rhs->Set_y_c(*tmp);
         tmp = curr->y_d()->MakeNew();
         tmp->AddOneVector(-1., *IpCq().curr_d_minus_s(), 0.);
         rhs->Set_y_d(*tmp);
      }
      else
      {
         rhs->Set_y_c(*IpCq().curr_jac_c_times_vec(*normal_x));
         tmp = normal_s->MakeNew();
         tmp->AddTwoVectors(1., *IpCq().curr_jac_d_times_vec(*normal_x), -1., *normal_s, 0.);
         rhs->Set_y_d(*tmp);

      }

      InexData().set_normal_x(normal_x);
      InexData().set_normal_s(normal_s);

      delta = rhs->MakeNewIteratesVector();
      retval = inexact_pd_solver_->Solve(*rhs, *delta);

      // Determine if acceptable step has been computed
      if( !compute_normal && (!retval || InexData().next_compute_normal()) )
      {
         // If normal step has not been computed and step is not satisfactory, try computing normal step
         InexData().set_compute_normal(true);
         compute_normal = true;
      }
      else
      {
         // If normal step has been computed, stop anyway
         done = true;
      }
   }

   if( retval )
   {
      // Store the search directions in the IpData object
      IpData().set_delta(delta);
      if( InexData().compute_normal() )
      {
         IpData().Append_info_string("NT ");
      }
      else
      {
         IpData().Append_info_string("PD ");
      }
   }

   return retval;
}
Ejemplo n.º 7
0
void InexactLSAcceptor::InitThisLineSearch(
   bool in_watchdog
   )
{
   DBG_START_METH("InexactLSAcceptor::InitThisLineSearch",
      dbg_verbosity);

   InexData().set_full_step_accepted(false);

   // Set the values for the reference point
   if( !in_watchdog )
   {
      reference_theta_ = IpCq().curr_constraint_violation();
      reference_barr_ = IpCq().curr_barrier_obj();

      //////////////////// Update the penalty parameter

      const Number uWu = InexCq().curr_uWu();
      SmartPtr<const Vector> tangential_x = InexData().tangential_x();
      SmartPtr<const Vector> tangential_s = InexData().tangential_s();
      const Number scaled_tangential_norm = InexCq().slack_scaled_norm(*tangential_x, *tangential_s);

      SmartPtr<const Vector> delta_x = IpData().delta()->x();
      SmartPtr<const Vector> delta_s = IpData().delta()->s();

      // Get the product of the steps with the Jacobian
      SmartPtr<Vector> cplusAd_c = IpData().curr()->y_c()->MakeNew();
      cplusAd_c->AddTwoVectors(1., *IpCq().curr_jac_c_times_vec(*delta_x), 1., *IpCq().curr_c(), 0.);
      SmartPtr<Vector> cplusAd_d = delta_s->MakeNew();
      cplusAd_d->AddTwoVectors(1., *IpCq().curr_jac_d_times_vec(*delta_x), -1., *delta_s, 0.);
      cplusAd_d->Axpy(1., *IpCq().curr_d_minus_s());
      const Number norm_cplusAd = IpCq().CalcNormOfType(NORM_2, *cplusAd_c, *cplusAd_d);

      const Number gradBarrTDelta = IpCq().curr_gradBarrTDelta();

      bool compute_normal = InexData().compute_normal();

      Number Upsilon = -1.;
      Number Nu = -1.;
      if( !compute_normal )
      {
#if 0
// Compute Nu = ||A*u||^2/||A||^2
         SmartPtr<const Vector> curr_Au_c = IpCq().curr_jac_c_times_vec(*tangential_x);
         SmartPtr<Vector> curr_Au_d = delta_s->MakeNew();
         curr_Au_d->AddTwoVectors(1., *IpCq().curr_jac_d_times_vec(*tangential_x), -1., *tangential_s, 0.);
         Number Nu = IpCq().CalcNormOfType(NORM_2, *curr_Au_c, *curr_Au_d);
         Nu = pow(Nu, 2);
         Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA, "nu update: ||A*u||^2 = %23.16e\n", Nu);
         Number A_norm2 = InexCq().curr_scaled_A_norm2();
         Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA, "nu update: ||A||^2 = %23.16e\n", A_norm2);
#endif
         Nu = 0; //Nu/A_norm2;
         Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA, "nu update: Nu = ||A*u||^2/||A||^2 = %23.16e\n", Nu);

         // Compute Upsilon = ||u||^2 - Nu
         Upsilon = scaled_tangential_norm - Nu;
         Jnlst().Printf(J_MOREDETAILED, J_LINEAR_ALGEBRA,
            "nu update: Upsilon = ||u||^2 - ||A*u||^2/||A||^2 = %23.16e\n", Upsilon);
      }

      DBG_PRINT((1, "gradBarrTDelta = %e norm_cplusAd = %e reference_theta_ = %e\n", gradBarrTDelta, norm_cplusAd, reference_theta_));

      // update the upper penalty parameter
      Number nu_mid = nu_;
      Number norm_delta_xs = Max(delta_x->Amax(), delta_s->Amax());
      last_nu_ = nu_;
      if( flexible_penalty_function_ )
      {
         last_nu_low_ = nu_low_;
      }
      in_tt2_ = false;
      if( norm_delta_xs == 0. )
      {
         Jnlst().Printf(J_DETAILED, J_LINE_SEARCH, "  Zero step, skipping line search\n");
         in_tt2_ = true;
      }
      // TODO: We need to find a proper cut-off value
      else if( reference_theta_ > nu_update_inf_skip_tol_ )
      {
         // Different numerator for different algorithms
         Number numerator;
         Number denominator;
         if( !compute_normal )
         {
            numerator = (gradBarrTDelta + Max(0.5 * uWu, tcc_theta_ * Upsilon));
            denominator = (1 - rho_) * (reference_theta_ - norm_cplusAd);
         }
         else
         {
            DBG_PRINT((1, "uWu=%e scaled_tangential_norm=%e\n", uWu , scaled_tangential_norm ));
            numerator = (gradBarrTDelta + Max(0.5 * uWu, tcc_theta_ * pow(scaled_tangential_norm, 2)));
            denominator = (1 - rho_) * (reference_theta_ - norm_cplusAd);
         }
         const Number nu_trial = numerator / denominator;
         // Different numerator for different algorithms
         if( !compute_normal )
         {
            Jnlst().Printf(J_MOREDETAILED, J_LINE_SEARCH,
               "In penalty parameter update formula:\n  gradBarrTDelta = %e 0.5*dWd = %e tcc_theta_*Upsilon = %e numerator = %e\n  reference_theta_ = %e norm_cplusAd + %e denominator = %e nu_trial = %e\n",
               gradBarrTDelta, 0.5 * uWu, tcc_theta_ * Upsilon, numerator, reference_theta_, norm_cplusAd, denominator,
               nu_trial);
         }
         else
         {
            Jnlst().Printf(J_MOREDETAILED, J_LINE_SEARCH,
               "In penalty parameter update formula:\n  gradBarrTDelta = %e 0.5*uWu = %e tcc_theta_*pow(scaled_tangential_norm,2) = %e numerator = %e\n  reference_theta_ = %e norm_cplusAd + %e denominator = %e nu_trial = %e\n",
               gradBarrTDelta, 0.5 * uWu, tcc_theta_ * pow(scaled_tangential_norm, 2), numerator, reference_theta_,
               norm_cplusAd, denominator, nu_trial);
         }

         if( nu_ < nu_trial )
         {
            nu_ = nu_trial + nu_inc_;
            nu_mid = nu_;
         }
         if( flexible_penalty_function_ )
         {
            nu_mid = Max(nu_low_, nu_trial);
            Jnlst().Printf(J_MOREDETAILED, J_LINE_SEARCH, "   nu_low = %8.2e\n", nu_low_);
         }
      }
      else
      {
         Jnlst().Printf(J_DETAILED, J_LINE_SEARCH,
            "Warning: Skipping nu update because current constraint violation (%e) less than nu_update_inf_skip_tol.\n",
            reference_theta_);
         IpData().Append_info_string("nS");
      }
      InexData().set_curr_nu(nu_);
      Jnlst().Printf(J_DETAILED, J_LINE_SEARCH, "  using nu = %23.16e (nu_mid = %23.16e)\n", nu_, nu_mid);

      // Compute the linear model reduction prediction
      DBG_PRINT((1, "gradBarrTDelta=%e reference_theta_=%e norm_cplusAd=%e\n", gradBarrTDelta, reference_theta_, norm_cplusAd));
      reference_pred_ = -gradBarrTDelta + nu_mid * (reference_theta_ - norm_cplusAd);

      watchdog_pred_ = 1e300;
   }
   else
   {
      reference_theta_ = watchdog_theta_;
      reference_barr_ = watchdog_barr_;
      reference_pred_ = watchdog_pred_;
   }
}