CompoundVector::CompoundVector(const CompoundVectorSpace* owner_space, bool create_new)
      :
      Vector(owner_space),
      comps_(owner_space->NCompSpaces()),
      const_comps_(owner_space->NCompSpaces()),
      owner_space_(owner_space),
      vectors_valid_(false)
  {
    Index dim_check = 0;
    for (Index i=0; i<NComps(); i++) {
      SmartPtr<const VectorSpace> space = owner_space_->GetCompSpace(i);
      DBG_ASSERT(IsValid(space));
      dim_check += space->Dim();

      if (create_new) {
        comps_[i] = space->MakeNew();
      }
    }

    DBG_ASSERT(dim_check == Dim());

    if (create_new) {
      vectors_valid_ = VectorsValid();
    }
  }
예제 #2
0
  SmartPtr<const Vector> AugRestoSystemSolver::Rhs_cR(const Vector& rhs_c,
      const SmartPtr<const Vector>& sigma_tilde_n_c_inv, const Vector& rhs_n_c,
      const SmartPtr<const Vector>& sigma_tilde_p_c_inv, const Vector& rhs_p_c)
  {
    DBG_START_METH("AugRestoSystemSolver::Rhs_cR",dbg_verbosity);
    SmartPtr<Vector> retVec;
    std::vector<const TaggedObject*> deps(5);
    std::vector<Number> scalar_deps;
    deps[0] = &rhs_c;
    deps[1] = GetRawPtr(sigma_tilde_n_c_inv);
    deps[2] = &rhs_n_c;
    deps[3] = GetRawPtr(sigma_tilde_p_c_inv);
    deps[4] = &rhs_p_c;
    if (!rhs_cR_cache_.GetCachedResult(retVec, deps, scalar_deps)) {
      DBG_PRINT((1,"Not found in cache\n"));
      retVec = rhs_c.MakeNew();
      retVec->Copy(rhs_c);

      SmartPtr<Vector> tmp = retVec->MakeNew();
      if (IsValid(sigma_tilde_n_c_inv)) {
        tmp->Copy(*sigma_tilde_n_c_inv);
        tmp->ElementWiseMultiply(rhs_n_c);
        retVec->Axpy(-1.0, *tmp);
      }

      if (IsValid(sigma_tilde_p_c_inv)) {
        tmp->Copy(*sigma_tilde_p_c_inv);
        tmp->ElementWiseMultiply(rhs_p_c);
        retVec->Axpy(1.0, *tmp);
      }
      rhs_cR_cache_.AddCachedResult(retVec, deps, scalar_deps);
    }
    return ConstPtr(retVec);
  }
예제 #3
0
  /* The functions SchurSolve do IFT step, if S_==NULL, and DenseGenSchurDriver otherwise. */
  bool DenseGenSchurDriver::SchurSolve(SmartPtr<IteratesVector> lhs, // new left hand side will be stored here
				       SmartPtr<const IteratesVector> rhs, // rhs r_s
				       SmartPtr<Vector> delta_u,  // should be (u_p - u_0) WATCH OUT FOR THE SIGN! I like it this way, so that u_0+delta_u = u_p, but victor always used it the other way round, so be careful. At the end, delta_nu is saved in here.
				       SmartPtr<IteratesVector> sol) // the vector K^(-1)*r_s which usually should have been computed before.

  {
    DBG_START_METH("DenseGenSchurDriver::SchurSolve", dbg_verbosity);
    DBG_ASSERT(IsValid(S_));
    bool retval;

    // set up rhs of equation (3.48a)
    SmartPtr<Vector> delta_rhs = delta_u->MakeNew();
    data_B()->Multiply(*sol, *delta_rhs);
    delta_rhs->Print(Jnlst(),J_VECTOR,J_USER1,"delta_rhs");
    delta_rhs->Scal(-1.0);
    delta_rhs->Axpy(1.0, *delta_u);
    delta_rhs->Print(Jnlst(),J_VECTOR,J_USER1,"rhs 3.48a");

    // solve equation (3.48a) for delta_nu
    SmartPtr<DenseVector> delta_nu = dynamic_cast<DenseVector*>(GetRawPtr(delta_rhs))->MakeNewDenseVector();
    delta_nu->Copy(*delta_rhs);
    S_->LUSolveVector(*delta_nu); // why is LUSolveVector not bool??
    delta_nu->Print(Jnlst(),J_VECTOR,J_USER1,"delta_nu");

    // solve equation (3.48b) for lhs (=delta_s)
    SmartPtr<IteratesVector> new_rhs = lhs->MakeNewIteratesVector();
    data_A()->TransMultiply(*delta_nu, *new_rhs);
    new_rhs->Axpy(-1.0, *rhs);
    new_rhs->Scal(-1.0);
    new_rhs->Print(Jnlst(),J_VECTOR,J_USER1,"new_rhs");
    retval = backsolver_->Solve(lhs, ConstPtr(new_rhs));

    return retval;
  }
예제 #4
0
SmartPtr<Vector> NLPScalingObject::apply_vector_scaling_d_LU_NonConst(
   const Matrix&                 Pd_LU,
   const SmartPtr<const Vector>& lu,
   const VectorSpace&            d_space
   )
{
   DBG_START_METH("NLPScalingObject::apply_vector_scaling_d_LU_NonConst", dbg_verbosity);
   SmartPtr<Vector> scaled_d_LU = lu->MakeNew();
   if( have_d_scaling() )
   {
      SmartPtr<Vector> tmp_d = d_space.MakeNew();

      // move to full d space
      Pd_LU.MultVector(1.0, *lu, 0.0, *tmp_d);

      // scale in full x space
      tmp_d = apply_vector_scaling_d_NonConst(ConstPtr(tmp_d));

      // move back to x_L space
      Pd_LU.TransMultVector(1.0, *tmp_d, 0.0, *scaled_d_LU);
   }
   else
   {
      scaled_d_LU->Copy(*lu);
   }

   return scaled_d_LU;
}
예제 #5
0
  SmartPtr<const Vector> AugRestoSystemSolver::Rhs_dR(const Vector& rhs_d,
      const SmartPtr<const Vector>& sigma_tilde_n_d_inv, const Vector& rhs_n_d, const Matrix& pd_L,
      const SmartPtr<const Vector>& sigma_tilde_p_d_inv, const Vector& rhs_p_d, const Matrix& neg_pd_U)
  {
    DBG_START_METH("AugRestoSystemSolver::Rhs_dR",dbg_verbosity);
    SmartPtr<Vector> retVec;
    std::vector<const TaggedObject*> deps(7);
    std::vector<Number> scalar_deps;
    deps[0] = &rhs_d;
    deps[1] = GetRawPtr(sigma_tilde_n_d_inv);
    deps[2] = &rhs_n_d;
    deps[3] = &pd_L;
    deps[4] = GetRawPtr(sigma_tilde_p_d_inv);
    deps[5] = &rhs_p_d;
    deps[6] = &neg_pd_U;
    if (!rhs_dR_cache_.GetCachedResult(retVec, deps, scalar_deps)) {
      DBG_PRINT((1,"Not found in cache\n"));
      retVec = rhs_d.MakeNew();
      retVec->Copy(rhs_d);

      if (IsValid(sigma_tilde_n_d_inv)) {
        SmartPtr<Vector> tmpn = sigma_tilde_n_d_inv->MakeNew();
        tmpn->Copy(*sigma_tilde_n_d_inv);
        tmpn->ElementWiseMultiply(rhs_n_d);
        pd_L.MultVector(-1.0, *tmpn, 1.0, *retVec);
      }

      if (IsValid(sigma_tilde_p_d_inv)) {
        SmartPtr<Vector> tmpp = sigma_tilde_p_d_inv->MakeNew();
        tmpp->Copy(*sigma_tilde_p_d_inv);
        tmpp->ElementWiseMultiply(rhs_p_d);
        neg_pd_U.MultVector(-1.0, *tmpp, 1.0, *retVec);
      }

      rhs_dR_cache_.AddCachedResult(retVec, deps, scalar_deps);
    }
    return ConstPtr(retVec);
  }
예제 #6
0
  SmartPtr<Vector> NLPScalingObject::apply_vector_scaling_x_LU_NonConst(
    const Matrix& Px_LU,
    const SmartPtr<const Vector>& lu,
    const VectorSpace& x_space)
  {
    SmartPtr<Vector> scaled_x_LU = lu->MakeNew();
    if (have_x_scaling()) {
      SmartPtr<Vector> tmp_x = x_space.MakeNew();

      // move to full x space
      Px_LU.MultVector(1.0, *lu, 0.0, *tmp_x);

      // scale in full x space
      tmp_x = apply_vector_scaling_x_NonConst(ConstPtr(tmp_x));

      // move back to x_L space
      Px_LU.TransMultVector(1.0, *tmp_x, 0.0, *scaled_x_LU);
    }
    else {
      scaled_x_LU->Copy(*lu);
    }

    return scaled_x_LU;
  }
예제 #7
0
  bool
  RestoRestorationPhase::PerformRestoration()
  {
    DBG_START_METH("RestoRestorationPhase::PerformRestoration",
                   dbg_verbosity);
    Jnlst().Printf(J_DETAILED, J_MAIN,
                   "Performing second level restoration phase for current constriant violation %8.2e\n", IpCq().curr_constraint_violation());

    DBG_ASSERT(IpCq().curr_constraint_violation()>0.);

    // Get a grip on the restoration phase NLP and obtain the pointers
    // to the original NLP data
    SmartPtr<RestoIpoptNLP> resto_ip_nlp =
      static_cast<RestoIpoptNLP*> (&IpNLP());
    DBG_ASSERT(dynamic_cast<RestoIpoptNLP*> (&IpNLP()));
    SmartPtr<IpoptNLP> orig_ip_nlp =
      static_cast<IpoptNLP*> (&resto_ip_nlp->OrigIpNLP());
    DBG_ASSERT(dynamic_cast<IpoptNLP*> (&resto_ip_nlp->OrigIpNLP()));

    // Get the current point and create a new vector for the result
    SmartPtr<const CompoundVector> Ccurr_x =
      static_cast<const CompoundVector*> (GetRawPtr(IpData().curr()->x()));
    SmartPtr<Vector> new_x = IpData().curr()->x()->MakeNew();
    SmartPtr<CompoundVector> Cnew_x =
      static_cast<CompoundVector*> (GetRawPtr(new_x));

    // The x values remain unchanged
    SmartPtr<Vector> x = Cnew_x->GetCompNonConst(0);
    x->Copy(*Ccurr_x->GetComp(0));

    // ToDo in free mu mode - what to do here?
    Number mu = IpData().curr_mu();

    // Compute the initial values for the n and p variables for the
    // equality constraints
    Number rho = resto_ip_nlp->Rho();
    SmartPtr<Vector> nc = Cnew_x->GetCompNonConst(1);
    SmartPtr<Vector> pc = Cnew_x->GetCompNonConst(2);
    SmartPtr<const Vector> cvec = orig_ip_nlp->c(*Ccurr_x->GetComp(0));
    SmartPtr<Vector> a = nc->MakeNew();
    SmartPtr<Vector> b = nc->MakeNew();
    a->Set(mu/(2.*rho));
    a->Axpy(-0.5, *cvec);
    b->Copy(*cvec);
    b->Scal(mu/(2.*rho));
    solve_quadratic(*a, *b, *nc);
    pc->Copy(*cvec);
    pc->Axpy(1., *nc);
    DBG_PRINT_VECTOR(2, "nc", *nc);
    DBG_PRINT_VECTOR(2, "pc", *pc);

    // initial values for the n and p variables for the inequality
    // constraints
    SmartPtr<Vector> nd = Cnew_x->GetCompNonConst(3);
    SmartPtr<Vector> pd = Cnew_x->GetCompNonConst(4);
    SmartPtr<Vector> dvec = pd->MakeNew();
    dvec->Copy(*orig_ip_nlp->d(*Ccurr_x->GetComp(0)));
    dvec->Axpy(-1., *IpData().curr()->s());
    a = nd->MakeNew();
    b = nd->MakeNew();
    a->Set(mu/(2.*rho));
    a->Axpy(-0.5, *dvec);
    b->Copy(*dvec);
    b->Scal(mu/(2.*rho));
    solve_quadratic(*a, *b, *nd);
    pd->Copy(*dvec);
    pd->Axpy(1., *nd);
    DBG_PRINT_VECTOR(2, "nd", *nd);
    DBG_PRINT_VECTOR(2, "pd", *pd);

    // Now set the trial point to the solution of the restoration phase
    // s and all multipliers remain unchanged
    SmartPtr<IteratesVector> new_trial = IpData().curr()->MakeNewContainer();
    new_trial->Set_x(*new_x);
    IpData().set_trial(new_trial);

    IpData().Append_info_string("R");

    return true;
  }
예제 #8
0
  bool RestoIterateInitializer::SetInitialIterates()
  {
    DBG_START_METH("RestoIterateInitializer::SetInitialIterates",
                   dbg_verbosity);

    // Get a grip on the restoration phase NLP and obtain the pointers
    // to the original NLP data
    SmartPtr<RestoIpoptNLP> resto_ip_nlp =
      static_cast<RestoIpoptNLP*> (&IpNLP());
    SmartPtr<IpoptNLP> orig_ip_nlp =
      static_cast<IpoptNLP*> (&resto_ip_nlp->OrigIpNLP());
    SmartPtr<IpoptData> orig_ip_data =
      static_cast<IpoptData*> (&resto_ip_nlp->OrigIpData());
    SmartPtr<IpoptCalculatedQuantities> orig_ip_cq =
      static_cast<IpoptCalculatedQuantities*> (&resto_ip_nlp->OrigIpCq());

    // Set the value of the barrier parameter
    Number resto_mu;
    resto_mu = Max(orig_ip_data->curr_mu(),
                   orig_ip_cq->curr_c()->Amax(),
                   orig_ip_cq->curr_d_minus_s()->Amax());
    IpData().Set_mu(resto_mu);
    Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
                   "Initial barrier parameter resto_mu = %e\n", resto_mu);

    /////////////////////////////////////////////////////////////////////
    //                   Initialize primal varialbes                   //
    /////////////////////////////////////////////////////////////////////

    // initialize the data structures in the restoration phase NLP
    IpData().InitializeDataStructures(IpNLP(), false, false, false,
                                      false, false);

    SmartPtr<Vector> new_x = IpData().curr()->x()->MakeNew();
    SmartPtr<CompoundVector> Cnew_x =
      static_cast<CompoundVector*> (GetRawPtr(new_x));

    // Set the trial x variables from the original NLP
    Cnew_x->GetCompNonConst(0)->Copy(*orig_ip_data->curr()->x());

    // Compute the initial values for the n and p variables for the
    // equality constraints
    Number rho = resto_ip_nlp->Rho();
    DBG_PRINT((1,"rho = %e\n", rho));
    SmartPtr<Vector> nc = Cnew_x->GetCompNonConst(1);
    SmartPtr<Vector> pc = Cnew_x->GetCompNonConst(2);
    SmartPtr<const Vector> cvec = orig_ip_cq->curr_c();
    DBG_PRINT_VECTOR(2, "cvec", *cvec);
    SmartPtr<Vector> a = nc->MakeNew();
    SmartPtr<Vector> b = nc->MakeNew();
    a->Set(resto_mu/(2.*rho));
    a->Axpy(-0.5, *cvec);
    b->Copy(*cvec);
    b->Scal(resto_mu/(2.*rho));
    DBG_PRINT_VECTOR(2, "a", *a);
    DBG_PRINT_VECTOR(2, "b", *b);
    solve_quadratic(*a, *b, *nc);
    pc->Copy(*cvec);
    pc->Axpy(1., *nc);
    DBG_PRINT_VECTOR(2, "nc", *nc);
    DBG_PRINT_VECTOR(2, "pc", *pc);

    // initial values for the n and p variables for the inequality
    // constraints
    SmartPtr<Vector> nd = Cnew_x->GetCompNonConst(3);
    SmartPtr<Vector> pd = Cnew_x->GetCompNonConst(4);
    cvec = orig_ip_cq->curr_d_minus_s();
    a = nd->MakeNew();
    b = nd->MakeNew();
    a->Set(resto_mu/(2.*rho));
    a->Axpy(-0.5, *cvec);
    b->Copy(*cvec);
    b->Scal(resto_mu/(2.*rho));
    solve_quadratic(*a, *b, *nd);
    pd->Copy(*cvec);
    pd->Axpy(1., *nd);
    DBG_PRINT_VECTOR(2, "nd", *nd);
    DBG_PRINT_VECTOR(2, "pd", *pd);

    // Leave the slacks unchanged
    SmartPtr<const Vector> new_s = orig_ip_data->curr()->s();

    // Now set the primal trial variables
    DBG_PRINT_VECTOR(2,"new_s",*new_s);
    DBG_PRINT_VECTOR(2,"new_x",*new_x);
    SmartPtr<IteratesVector> trial = IpData().curr()->MakeNewContainer();
    trial->Set_primal(*new_x, *new_s);
    IpData().set_trial(trial);

    DBG_PRINT_VECTOR(2, "resto_c", *IpCq().trial_c());
    DBG_PRINT_VECTOR(2, "resto_d_minus_s", *IpCq().trial_d_minus_s());

    /////////////////////////////////////////////////////////////////////
    //                   Initialize bound multipliers                  //
    /////////////////////////////////////////////////////////////////////

    SmartPtr<Vector> new_z_L = IpData().curr()->z_L()->MakeNew();
    SmartPtr<CompoundVector> Cnew_z_L =
      static_cast<CompoundVector*> (GetRawPtr(new_z_L));
    DBG_ASSERT(IsValid(Cnew_z_L));
    SmartPtr<Vector> new_z_U = IpData().curr()->z_U()->MakeNew();
    SmartPtr<Vector> new_v_L = IpData().curr()->v_L()->MakeNew();
    SmartPtr<Vector> new_v_U = IpData().curr()->v_U()->MakeNew();

    // multipliers for the original bounds are
    SmartPtr<const Vector> orig_z_L = orig_ip_data->curr()->z_L();
    SmartPtr<const Vector> orig_z_U = orig_ip_data->curr()->z_U();
    SmartPtr<const Vector> orig_v_L = orig_ip_data->curr()->v_L();
    SmartPtr<const Vector> orig_v_U = orig_ip_data->curr()->v_U();

    // Set the new multipliers to the min of the penalty parameter Rho
    // and their current value
    SmartPtr<Vector> Cnew_z_L0 = Cnew_z_L->GetCompNonConst(0);
    Cnew_z_L0->Set(rho);
    Cnew_z_L0->ElementWiseMin(*orig_z_L);
    new_z_U->Set(rho);
    new_z_U->ElementWiseMin(*orig_z_U);
    new_v_L->Set(rho);
    new_v_L->ElementWiseMin(*orig_v_L);
    new_v_U->Set(rho);
    new_v_U->ElementWiseMin(*orig_v_U);

    // Set the multipliers for the p and n bounds to the "primal" multipliers
    SmartPtr<Vector> Cnew_z_L1 = Cnew_z_L->GetCompNonConst(1);
    Cnew_z_L1->Set(resto_mu);
    Cnew_z_L1->ElementWiseDivide(*nc);
    SmartPtr<Vector> Cnew_z_L2 = Cnew_z_L->GetCompNonConst(2);
    Cnew_z_L2->Set(resto_mu);
    Cnew_z_L2->ElementWiseDivide(*pc);
    SmartPtr<Vector> Cnew_z_L3 = Cnew_z_L->GetCompNonConst(3);
    Cnew_z_L3->Set(resto_mu);
    Cnew_z_L3->ElementWiseDivide(*nd);
    SmartPtr<Vector> Cnew_z_L4 = Cnew_z_L->GetCompNonConst(4);
    Cnew_z_L4->Set(resto_mu);
    Cnew_z_L4->ElementWiseDivide(*pd);

    // Set those initial values to be the trial values in Data
    trial = IpData().trial()->MakeNewContainer();
    trial->Set_bound_mult(*new_z_L, *new_z_U, *new_v_L, *new_v_U);
    IpData().set_trial(trial);

    /////////////////////////////////////////////////////////////////////
    //           Initialize equality constraint multipliers            //
    /////////////////////////////////////////////////////////////////////

    DefaultIterateInitializer::least_square_mults(
      Jnlst(), IpNLP(), IpData(), IpCq(),
      resto_eq_mult_calculator_, constr_mult_init_max_);

    // upgrade the trial to the current point
    IpData().AcceptTrialPoint();

    DBG_PRINT_VECTOR(2, "y_c", *IpData().curr()->y_c());
    DBG_PRINT_VECTOR(2, "y_d", *IpData().curr()->y_d());

    DBG_PRINT_VECTOR(2, "z_L", *IpData().curr()->z_L());
    DBG_PRINT_VECTOR(2, "z_U", *IpData().curr()->z_U());
    DBG_PRINT_VECTOR(2, "v_L", *IpData().curr()->v_L());
    DBG_PRINT_VECTOR(2, "v_U", *IpData().curr()->v_U());

    return true;
  }
예제 #9
0
  void EquilibrationScaling::DetermineScalingParametersImpl(
    const SmartPtr<const VectorSpace> x_space,
    const SmartPtr<const VectorSpace> c_space,
    const SmartPtr<const VectorSpace> d_space,
    const SmartPtr<const MatrixSpace> jac_c_space,
    const SmartPtr<const MatrixSpace> jac_d_space,
    const SmartPtr<const SymMatrixSpace> h_space,
    const Matrix& Px_L, const Vector& x_L,
    const Matrix& Px_U, const Vector& x_U,
    Number& df,
    SmartPtr<Vector>& dx,
    SmartPtr<Vector>& dc,
    SmartPtr<Vector>& dd)
  {
    DBG_ASSERT(IsValid(nlp_));

    SmartPtr<Vector> x0 = x_space->MakeNew();
    if (!nlp_->GetStartingPoint(GetRawPtr(x0), true,
                                NULL, false,
                                NULL, false,
                                NULL, false,
                                NULL, false)) {
      THROW_EXCEPTION(FAILED_INITIALIZATION,
                      "Error getting initial point from NLP in EquilibrationScaling.\n");
    }

    // We store the added absolute values of the Jacobian and
    // objective function gradient in an array of sufficient size

    SmartPtr<Matrix> jac_c = jac_c_space->MakeNew();
    SmartPtr<Matrix> jac_d = jac_d_space->MakeNew();
    SmartPtr<Vector> grad_f = x_space->MakeNew();
    const Index nnz_jac_c = TripletHelper::GetNumberEntries(*jac_c);
    const Index nnz_jac_d = TripletHelper::GetNumberEntries(*jac_d);
    const Index nc = jac_c_space->NRows();
    const Index nd = jac_d_space->NRows();
    const Index nx = x_space->Dim();
    Number* avrg_values = new Number[nnz_jac_c+nnz_jac_d+nx];
    Number* val_buffer = new Number[Max(nnz_jac_c,nnz_jac_d,nx)];

    SmartPtr<PointPerturber> perturber =
      new PointPerturber(*x0, point_perturbation_radius_,
                         Px_L, x_L, Px_U, x_U);

    const Index num_evals = 4;
    const Index max_num_eval_errors = 10;
    Index num_eval_errors = 0;
    for (Index ieval=0; ieval<num_evals; ieval++) {
      // Compute obj gradient and Jacobian at random perturbation point
      bool done = false;
      while (!done) {
        SmartPtr<Vector> xpert = perturber->MakeNewPerturbedPoint();
        done = (nlp_->Eval_grad_f(*xpert, *grad_f) &&
                nlp_->Eval_jac_c(*xpert, *jac_c) &&
                nlp_->Eval_jac_d(*xpert, *jac_d));
        if (!done) {
          Jnlst().Printf(J_WARNING, J_INITIALIZATION,
                         "Error evaluating first derivatives as at perturbed point for equilibration-based scaling.\n");
          num_eval_errors++;
        }
        if (num_eval_errors>max_num_eval_errors) {
          delete [] val_buffer;
          delete [] avrg_values;
          THROW_EXCEPTION(FAILED_INITIALIZATION,
                          "Too many evaluation failures during equilibiration-based scaling.");
        }
      }
      // Get the numbers out of the matrices and vectors, and add it
      // to avrg_values
      TripletHelper::FillValues(nnz_jac_c, *jac_c, val_buffer);
      if (ieval==0) {
        for (Index i=0; i<nnz_jac_c; i++) {
          avrg_values[i] = fabs(val_buffer[i]);
        }
      }
      else {
        for (Index i=0; i<nnz_jac_c; i++) {
          avrg_values[i] =+ fabs(val_buffer[i]);
        }
      }
      TripletHelper::FillValues(nnz_jac_d, *jac_d, val_buffer);
      if (ieval==0) {
        for (Index i=0; i<nnz_jac_d; i++) {
          avrg_values[nnz_jac_c+i] = fabs(val_buffer[i]);
        }
      }
      else {
        for (Index i=0; i<nnz_jac_d; i++) {
          avrg_values[nnz_jac_c+i] =+ fabs(val_buffer[i]);
        }
      }
      TripletHelper::FillValuesFromVector(nx, *grad_f, val_buffer);
      if (ieval==0) {
        for (Index i=0; i<nx; i++) {
          avrg_values[nnz_jac_c+nnz_jac_d+i] = fabs(val_buffer[i]);
        }
      }
      else {
        for (Index i=0; i<nx; i++) {
          avrg_values[nnz_jac_c+nnz_jac_d+i] =+ fabs(val_buffer[i]);
        }
      }
    }
    delete [] val_buffer;
    for (Index i=0; i<nnz_jac_c+nnz_jac_d+nx; i++) {
      avrg_values[i] /= (Number)num_evals;
    }

    // Get the sparsity structure
    ipfint* AIRN = new ipfint[nnz_jac_c+nnz_jac_d+nx];
    ipfint* AJCN = new ipfint[nnz_jac_c+nnz_jac_d+nx];
    if (sizeof(ipfint)==sizeof(Index)) {
      TripletHelper::FillRowCol(nnz_jac_c, *jac_c, &AIRN[0], &AJCN[0]);
      TripletHelper::FillRowCol(nnz_jac_d, *jac_d, &AIRN[nnz_jac_c], &AJCN[nnz_jac_c], nc);
    }
    else {
      THROW_EXCEPTION(INTERNAL_ABORT,
                      "Need to implement missing code in EquilibriationScaling.");
    }
    // sort out the zero entries in objective function gradient
    Index nnz_grad_f = 0;
    const Index idx = nnz_jac_c+nnz_jac_d;
    for (Index i=0; i<nx; i++) {
      if (avrg_values[idx+i] != 0.) {
        AIRN[idx+nnz_grad_f] = nc+nd+1;
        AJCN[idx+nnz_grad_f] = i+1;
        avrg_values[idx+nnz_grad_f] = avrg_values[idx+i];
        nnz_grad_f++;
      }
    }

    // Now call MC19 to compute the scaling factors
    const ipfint N = Max(nc+nd+1,nx);
    float* R = new float[N];
    float* C = new float[N];
    float* W = new float[5*N];
#ifdef COINHSL_HAS_MC19
    const ipfint NZ = nnz_jac_c+nnz_jac_d+nnz_grad_f;
    //F77_FUNC(mc19ad,MC19AD)(&N, &NZ, avrg_values, AIRN, AJCN, R, C, W);
    F77_FUNC(mc19ad,MC19AD)(&N, &NZ, avrg_values, AJCN, AIRN, C, R, W);
#else

    THROW_EXCEPTION(OPTION_INVALID,
                    "Currently cannot do equilibration-based NLP scaling if MC19 is not available.");
#endif

    delete[] W;

    delete [] avrg_values;
    delete [] AIRN;
    delete [] AJCN;

    // Correct the scaling values
    Number* row_scale = new Number[nc+nd+1];
    Number* col_scale = new Number[nx];
    for (Index i=0; i<nc+nd+1; i++) {
      row_scale[i] = exp((Number)R[i]);
    }
    for (Index i=0; i<nx; i++) {
      col_scale[i] = exp((Number)C[i]);
    }
    delete [] R;
    delete [] C;

    // get the scaling factors
    df = row_scale[nc+nd];
    dc = c_space->MakeNew();
    TripletHelper::PutValuesInVector(nc, &row_scale[0], *dc);
    dd = d_space->MakeNew();
    TripletHelper::PutValuesInVector(nd, &row_scale[nc], *dd);
    dx = x_space->MakeNew();
    TripletHelper::PutValuesInVector(nx, col_scale, *dx);

    delete [] row_scale;
    delete [] col_scale;
  }
예제 #10
0
  void GradientScaling::DetermineScalingParametersImpl(
    const SmartPtr<const VectorSpace> x_space,
    const SmartPtr<const VectorSpace> p_space,
    const SmartPtr<const VectorSpace> c_space,
    const SmartPtr<const VectorSpace> d_space,
    const SmartPtr<const MatrixSpace> jac_c_space,
    const SmartPtr<const MatrixSpace> jac_d_space,
    const SmartPtr<const SymMatrixSpace> h_space,
    const Matrix& Px_L, const Vector& x_L,
    const Matrix& Px_U, const Vector& x_U,
    Number& df,
    SmartPtr<Vector>& dx,
    SmartPtr<Vector>& dc,
    SmartPtr<Vector>& dd)
  {
    DBG_ASSERT(IsValid(nlp_));

    SmartPtr<Vector> x = x_space->MakeNew();
    SmartPtr<Vector> p = p_space->MakeNew();
    if (!nlp_->GetStartingPoint(GetRawPtr(x), true,
				GetRawPtr(p), true,
                                NULL, false,
                                NULL, false,
                                NULL, false,
                                NULL, false)) {
      THROW_EXCEPTION(FAILED_INITIALIZATION,
                      "Error getting initial point from NLP in GradientScaling.\n");
    }

    //
    // Calculate grad_f scaling
    //
    SmartPtr<Vector> grad_f = x_space->MakeNew();
    if (nlp_->Eval_grad_f(*x, *p, *grad_f)) {
      double max_grad_f = grad_f->Amax();
      df = 1.;
      if (scaling_obj_target_gradient_ == 0.) {
        if (max_grad_f > scaling_max_gradient_) {
          df = scaling_max_gradient_ / max_grad_f;
        }
      }
      else {
        if (max_grad_f == 0.) {
          Jnlst().Printf(J_WARNING, J_INITIALIZATION,
                         "Gradient of objective function is zero at starting point.  Cannot determine scaling factor based on scaling_obj_target_gradient option.\n");
        }
        else {
          df = scaling_obj_target_gradient_ / max_grad_f;
        }
      }
      df = Max(df, scaling_min_value_);
      Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
                     "Scaling parameter for objective function = %e\n", df);
    }
    else {
      Jnlst().Printf(J_WARNING, J_INITIALIZATION,
                     "Error evaluating objective gradient at user provided starting point.\n  No scaling factor for objective function computed!\n");
      df = 1.;
    }
    //
    // No x scaling
    //
    dx = NULL;

    dc = NULL;
    if (c_space->Dim()>0) {
      //
      // Calculate c scaling
      //
      SmartPtr<Matrix> jac_c = jac_c_space->MakeNew();
      if (nlp_->Eval_jac_c(*x, *p, *jac_c)) {
        dc = c_space->MakeNew();
        const double dbl_min = std::numeric_limits<double>::min();
        dc->Set(dbl_min);
        jac_c->ComputeRowAMax(*dc, false);
        Number arow_max = dc->Amax();
        if (scaling_constr_target_gradient_<=0.) {
          if (arow_max > scaling_max_gradient_) {
            dc->ElementWiseReciprocal();
            dc->Scal(scaling_max_gradient_);
            SmartPtr<Vector> dummy = dc->MakeNew();
            dummy->Set(1.);
            dc->ElementWiseMin(*dummy);
          }
          else {
            dc = NULL;
          }
        }
        else {
          dc->Set(scaling_constr_target_gradient_/arow_max);
        }
        if (IsValid(dc) && scaling_min_value_ > 0.) {
          SmartPtr<Vector> tmp = dc->MakeNew();
          tmp->Set(scaling_min_value_);
          dc->ElementWiseMax(*tmp);
        }
      }
      else {
        Jnlst().Printf(J_WARNING, J_INITIALIZATION,
                       "Error evaluating Jacobian of equality constraints at user provided starting point.\n  No scaling factors for equality constraints computed!\n");
      }
    }

    dd = NULL;
    if (d_space->Dim()>0) {
      //
      // Calculate d scaling
      //
      SmartPtr<Matrix> jac_d = jac_d_space->MakeNew();
      if (nlp_->Eval_jac_d(*x, *p, *jac_d)) {
        dd = d_space->MakeNew();
        const double dbl_min = std::numeric_limits<double>::min();
        dd->Set(dbl_min);
        jac_d->ComputeRowAMax(*dd, false);
        Number arow_max = dd->Amax();
        if (scaling_constr_target_gradient_<=0.) {
          if (arow_max > scaling_max_gradient_) {
            dd->ElementWiseReciprocal();
            dd->Scal(scaling_max_gradient_);
            SmartPtr<Vector> dummy = dd->MakeNew();
            dummy->Set(1.);
            dd->ElementWiseMin(*dummy);
          }
          else {
            dd = NULL;
          }
        }
        else {
          dd->Set(scaling_constr_target_gradient_/arow_max);
        }
        if (IsValid(dd) && scaling_min_value_ > 0.) {
          SmartPtr<Vector> tmp = dd->MakeNew();
          tmp->Set(scaling_min_value_);
          dd->ElementWiseMax(*tmp);
        }
      }
      else {
        Jnlst().Printf(J_WARNING, J_INITIALIZATION,
                       "Error evaluating Jacobian of inequality constraints at user provided starting point.\n  No scaling factors for inequality constraints computed!\n");
      }
    }
  }
예제 #11
0
 inline
 Vector* Vector::MakeNew() const
 {
   return owner_space_->MakeNew();
 }
예제 #12
0
Number InexactLSAcceptor::ComputeAlphaForY(
   Number                    alpha_primal,
   Number                    alpha_dual,
   SmartPtr<IteratesVector>& delta
   )
{
   DBG_START_METH("InexactLSAcceptor::ComputeAlphaForY",
      dbg_verbosity);

   // Here, we choose as stepsize for y either alpha_primal, if the
   // conditions from the ineqaxt paper is satisfied for it, or we
   // compute the step size closest to alpha_primal but great than
   // it, that does give the same progress as the full step would
   // give.

   Number alpha_y = alpha_primal;

   SmartPtr<Vector> gx = IpCq().curr_grad_barrier_obj_x()->MakeNewCopy();
   gx->AddTwoVectors(1., *IpCq().curr_jac_cT_times_curr_y_c(), 1., *IpCq().curr_jac_dT_times_curr_y_d(), 1.);
   SmartPtr<Vector> Jxy = gx->MakeNew();
   IpCq().curr_jac_c()->TransMultVector(1., *delta->y_c(), 0., *Jxy);
   IpCq().curr_jac_d()->TransMultVector(1., *delta->y_d(), 1., *Jxy);

   SmartPtr<const Vector> curr_scaling_slacks = InexCq().curr_scaling_slacks();
   SmartPtr<Vector> gs = curr_scaling_slacks->MakeNew();
   gs->AddTwoVectors(1., *IpCq().curr_grad_barrier_obj_s(), -1., *IpData().curr()->y_d(), 0.);
   gs->ElementWiseMultiply(*curr_scaling_slacks);

   SmartPtr<Vector> Sdy = delta->y_d()->MakeNewCopy();
   Sdy->ElementWiseMultiply(*curr_scaling_slacks);

   // using the magic formula in my notebook
   Number a = pow(Jxy->Nrm2(), 2) + pow(Sdy->Nrm2(), 2);
   Number b = 2 * (gx->Dot(*Jxy) - gs->Dot(*Sdy));
   Number c = pow(gx->Nrm2(), 2) + pow(gs->Nrm2(), 2);

   // First we check if the primal step size is good enough:
   Number val_ap = alpha_primal * alpha_primal * a + alpha_primal * b + c;
   Number val_1 = a + b + c;

   if( val_ap <= val_1 )
   {
      Jnlst().Printf(J_DETAILED, J_LINE_SEARCH, "  Step size for y: using alpha_primal\n.");
   }
   else
   {
      Number alpha_2 = -b / a - 1.;
      Jnlst().Printf(J_DETAILED, J_LINE_SEARCH, "  Step size for y candidate: %8.2e - ", alpha_2);
      if( alpha_2 > alpha_primal && alpha_2 < 1. )
      {
         alpha_y = alpha_2;
         Jnlst().Printf(J_DETAILED, J_LINE_SEARCH, "using that one\n.");
      }
      else
      {
         alpha_y = 1.;
         Jnlst().Printf(J_DETAILED, J_LINE_SEARCH, "using 1 instead\n");
      }
   }

   return alpha_y;
}
예제 #13
0
  void GradientScaling::DetermineScalingParametersImpl(
    const SmartPtr<const VectorSpace> x_space,
    const SmartPtr<const VectorSpace> c_space,
    const SmartPtr<const VectorSpace> d_space,
    const SmartPtr<const MatrixSpace> jac_c_space,
    const SmartPtr<const MatrixSpace> jac_d_space,
    const SmartPtr<const SymMatrixSpace> h_space,
    Number& df,
    SmartPtr<Vector>& dx,
    SmartPtr<Vector>& dc,
    SmartPtr<Vector>& dd)
  {
    DBG_ASSERT(IsValid(nlp_));

    SmartPtr<Vector> x = x_space->MakeNew();
    if (!nlp_->GetStartingPoint(GetRawPtr(x), true,
                                NULL, false,
                                NULL, false,
                                NULL, false,
                                NULL, false)) {
      THROW_EXCEPTION(FAILED_INITIALIZATION,
                      "Error getting initial point from NLP in GradientScaling.\n");
    }

    //
    // Calculate grad_f scaling
    //
    SmartPtr<Vector> grad_f = x_space->MakeNew();
    if (nlp_->Eval_grad_f(*x, *grad_f)) {
      Number max_grad_f = grad_f->Amax();
      df = 1;
      if (max_grad_f > scaling_max_gradient_) {
        df = scaling_max_gradient_ / max_grad_f;
      }
      Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
                     "Scaling parameter for objective function = %e\n", df);
    }
    else {
      Jnlst().Printf(J_WARNING, J_INITIALIZATION,
                     "Error evaluating objective gradient at user provided starting point.\n  No scaling factor for objective function computed!\n");
      df = 1;
    }
    //
    // No x scaling
    //
    dx = NULL;

    //
    // Calculate c scaling
    //
    SmartPtr<Matrix> jac_c = jac_c_space->MakeNew();
    if (nlp_->Eval_jac_c(*x, *jac_c)) {
      // ToDo: Don't use TripletHelper, have special methods on matrices instead
      Index nnz = TripletHelper::GetNumberEntries(*jac_c);
      Index* irow = new Index[nnz];
      Index* jcol = new Index[nnz];
      Number* values = new Number[nnz];
      TripletHelper::FillRowCol(nnz, *jac_c, irow, jcol);
      TripletHelper::FillValues(nnz, *jac_c, values);
      Number* c_scaling = new Number[jac_c->NRows()];

      for (Index r=0; r<jac_c->NRows(); r++) {
        c_scaling[r] = 0;
      }

      // put the max of each row into c_scaling...
      bool need_c_scale = false;
      for (Index i=0; i<nnz; i++) {
        if (fabs(values[i]) > scaling_max_gradient_) {
          c_scaling[irow[i]-1] = Max(c_scaling[irow[i]-1], fabs(values[i]));
          need_c_scale = true;
        }
      }

      if (need_c_scale) {
        // now compute the scaling factors for each row
        for (Index r=0; r<jac_c->NRows(); r++) {
          Number scaling = 1.0;
          if (c_scaling[r] > scaling_max_gradient_) {
            scaling = scaling_max_gradient_/c_scaling[r];
          }
          c_scaling[r] = scaling;
        }

        dc = c_space->MakeNew();
        TripletHelper::PutValuesInVector(jac_c->NRows(), c_scaling, *dc);
        if (Jnlst().ProduceOutput(J_DETAILED, J_INITIALIZATION)) {
          Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
                         "Equality constraints are scaled with smallest scaling parameter is %e\n", dc->Min());
        }
      }
      else {
        Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
                       "Equality constraints are not scaled.\n");
        dc = NULL;
      }

      delete [] irow;
      irow = NULL;
      delete [] jcol;
      jcol = NULL;
      delete [] values;
      values = NULL;
      delete [] c_scaling;
      c_scaling = NULL;
    }
    else {
      Jnlst().Printf(J_WARNING, J_INITIALIZATION,
                     "Error evaluating Jacobian of equality constraints at user provided starting point.\n  No scaling factors for equality constraints computed!\n");
      dc = NULL;
    }

    //
    // Calculate d scaling
    //
    SmartPtr<Matrix> jac_d = jac_d_space->MakeNew();
    if (nlp_->Eval_jac_d(*x, *jac_d)) {
      Index nnz = TripletHelper::GetNumberEntries(*jac_d);
      Index* irow = new Index[nnz];
      Index* jcol = new Index[nnz];
      Number* values = new Number[nnz];
      TripletHelper::FillRowCol(nnz, *jac_d, irow, jcol);
      TripletHelper::FillValues(nnz, *jac_d, values);
      Number* d_scaling = new Number[jac_d->NRows()];

      for (Index r=0; r<jac_d->NRows(); r++) {
        d_scaling[r] = 0;
      }

      // put the max of each row into c_scaling...
      bool need_d_scale = false;
      for (Index i=0; i<nnz; i++) {
        if (fabs(values[i]) > scaling_max_gradient_) {
          d_scaling[irow[i]-1] = Max(d_scaling[irow[i]-1], fabs(values[i]));
          need_d_scale = true;
        }
      }

      if (need_d_scale) {
        // now compute the scaling factors for each row
        for (Index r=0; r<jac_d->NRows(); r++) {
          Number scaling = 1.0;
          if (d_scaling[r] > scaling_max_gradient_) {
            scaling = scaling_max_gradient_/d_scaling[r];
          }
          d_scaling[r] = scaling;
        }

        dd = d_space->MakeNew();
        TripletHelper::PutValuesInVector(jac_d->NRows(), d_scaling, *dd);
        if (Jnlst().ProduceOutput(J_DETAILED, J_INITIALIZATION)) {
          Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
                         "Inequality constraints are scaled with smallest scaling parameter is %e\n", dd->Min());
        }
      }
      else {
        dd = NULL;
        Jnlst().Printf(J_DETAILED, J_INITIALIZATION,
                       "Inequality constraints are not scaled.\n");
      }

      delete [] irow;
      irow = NULL;
      delete [] jcol;
      jcol = NULL;
      delete [] values;
      values = NULL;
      delete [] d_scaling;
      d_scaling = NULL;
    }
    else {
      Jnlst().Printf(J_WARNING, J_INITIALIZATION,
                     "Error evaluating Jacobian of inequality constraints at user provided starting point.\n  No scaling factors for inequality constraints computed!\n");
      dd = NULL;
    }
  }