void NLPDakota::imp_calc_point( ERequiredFunc required ,const DVectorSlice &x_full ,bool newx ,const ZeroOrderInfoSerial *zero_order_info ,const ObjGradInfoSerial *obj_grad_info ,const FirstOrderExplInfo *first_order_expl_info ) const { using LinAlgOpPack::Vp_StV; typedef FirstOrderExplInfo::val_t G_val_t; typedef FirstOrderExplInfo::ivect_t G_ivect_t; typedef FirstOrderExplInfo::jvect_t G_jvect_t; Cout << "\nEntering NLPDakota::imp_calc_point(...) ...\n"; // throw std::logic_error("NLPDakota::imp_calc_point(): End, dam it!"); if( newx ) { f_orig_updated_ = false; c_orig_updated_ = false; h_orig_updated_ = false; Gf_orig_updated_ = false; Gc_orig_updated_ = false; Gh_orig_updated_ = false; my_vec_view(dakota_functions_) = 0.0; } const DVectorSlice x_orig = x_full(1,n_orig_); // // Determine which quantities to compute // const bool calc_f = ( required==CALC_F || multi_calc_ ) && !f_orig_updated_; const bool calc_c = ( required==CALC_C || multi_calc_ ) && !c_orig_updated_; const bool calc_h = ( required==CALC_H || multi_calc_ ) && !h_orig_updated_; const bool calc_Gf = ( required==CALC_GF || ( (int)required >= (int)CALC_GF && multi_calc_ ) ) && !Gf_orig_updated_; const bool calc_Gc = ( required==CALC_GC || ( (int)required >= (int)CALC_GF && multi_calc_ ) ) && !Gc_orig_updated_; const bool calc_Gh = ( required==CALC_GH || ( (int)required >= (int)CALC_GF && multi_calc_ ) ) && !Gh_orig_updated_; if( !calc_f && !calc_c && !calc_h && !calc_Gf && !calc_Gc && !calc_Gh ) return; // Everything is already computed at this point! value_type *f = NULL; DVector *c = NULL; DVector *h = NULL; DVector *Gf = NULL; G_val_t *Gc_val = NULL; G_ivect_t *Gc_ivect = NULL; G_jvect_t *Gc_jvect = NULL; G_val_t *Gh_val = NULL; G_ivect_t *Gh_ivect = NULL; G_jvect_t *Gh_jvect = NULL; if(zero_order_info) { if(calc_f) f = zero_order_info->f; if(calc_c) c = zero_order_info->c; if(calc_h) h = zero_order_info->h; } else if(obj_grad_info) { if(calc_f) f = obj_grad_info->f; if(calc_c) c = obj_grad_info->c; if(calc_h) h = obj_grad_info->h; if(calc_Gf) Gf = obj_grad_info->Gf; } else if(first_order_expl_info) { if(calc_f) f = first_order_expl_info->f; if(calc_c) c = first_order_expl_info->c; if(calc_h) h = first_order_expl_info->h; if(calc_Gf) Gf = first_order_expl_info->Gf; if(calc_Gc) { Gc_val = first_order_expl_info->Gc_val; Gc_ivect = first_order_expl_info->Gc_ivect; Gc_jvect = first_order_expl_info->Gc_jvect; } if(calc_Gh) { Gh_val = first_order_expl_info->Gh_val; Gh_ivect = first_order_expl_info->Gh_ivect; Gh_jvect = first_order_expl_info->Gh_jvect; } } else { THROW_EXCEPTION( !(zero_order_info || obj_grad_info || first_order_expl_info), std::logic_error ,"NLPDakota::imp_calc_point(): Error!" ); } // // Set the Dakota active-set vector. // // Dakota sorts the responce functions as general inequalities first // and general equalities second. // std::fill_n( &dakota_asv_[0], dakota_asv_.length(), 0 ); const int num_obj_funcs = multi_obj_weights_.length(); if( calc_f || calc_Gf ) { std::fill_n( &dakota_asv_[0], num_obj_funcs ,(calc_f ? 1 : 0) + (calc_Gf ? 2 : 0) ); } if( ( calc_c || calc_Gc ) && num_nonlin_eq_ ) { std::fill_n( &dakota_asv_[num_obj_funcs + num_nonlin_ineq_], num_nonlin_eq_ ,(calc_c ? 1 : 0) + (calc_Gc ? 2 : 0) ); } if( ( calc_h || calc_Gh ) && num_nonlin_ineq_ ) { std::fill_n( &dakota_asv_[num_obj_funcs], num_nonlin_ineq_ ,(calc_h ? 1 : 0) + (calc_Gh ? 2 : 0) ); } // // Compute the nonlinear functions // my_vec_view(dakota_x_) = x_orig; model_->continuous_variables(dakota_x_); model_->compute_response(dakota_asv_); const Dakota::Response& local_response = model_->current_response(); const Dakota::RealVector& local_function_values = local_response.function_values(); const Dakota::RealMatrix& local_function_gradients = local_response.function_gradients(); // Look for failed response const bool eval_failed = (local_function_values[0] == dakota_failed_value_); // // Map to the NLP objects // if(!eval_failed) { // // The evaluation succeeded so copy the values // // f, Gf if(calc_f) *f = 0.0; if(calc_Gf) *Gf = 0.0; for( int i = 0; i < num_obj_funcs; ++i ) { if(calc_f) { *f += multi_obj_weights_[i] * local_function_values[i]; dakota_functions_[i] = local_function_values[i]; // Save in dakota_functions } if(calc_Gf) { Vp_StV( &(*Gf)(1,n_orig_), multi_obj_weights_[i] ,DVectorSlice(&const_cast<Dakota::RealMatrix&>(local_function_gradients)[i][0],n_orig_) ); } } if(calc_Gf && Gf->dim() > n_orig_ ) (*Gf)(n_orig_+1,Gf->dim()) = 0.0; // Zero for slack variables // c = [ A_lin_eq' * x - lin_eq_targ ] // [ response(num_obj_funcs+num_nonlin_ineq+1:num_obj_funs+num_nonlin_ineq+num_nonlin_eq) - nonlin_eq_targ ] if( calc_c ) { if( num_lin_eq_ ) { DVectorSlice c_lin = (*c)(1,num_lin_eq_); V_mV( &c_lin, my_vec_view(dakota_rsqp_opt_->lin_eq_targ()) ); const Dakota::RealMatrix &lin_eq_jac = dakota_rsqp_opt_->lin_eq_jac(); for( int j = 0; j < num_lin_eq_; ++j ) { for( int i = 0; i < n_orig_; ++i ) { c_lin[j] += lin_eq_jac[j][i] * x_orig[i]; } } } if( num_nonlin_eq_ ) { const value_type *nonlin_eq_ptr = &const_cast<Dakota::RealVector&>(local_function_values)[num_obj_funcs+num_nonlin_ineq_]; V_VmV( &(*c)(num_lin_eq_+1,num_lin_eq_+num_nonlin_eq_) ,DVectorSlice(const_cast<value_type*>(nonlin_eq_ptr),num_nonlin_eq_) ,my_vec_view(dakota_rsqp_opt_->nonlin_eq_targ()) ); // Save in dakota_functions std::copy( nonlin_eq_ptr, nonlin_eq_ptr + num_nonlin_eq_, &dakota_functions_[num_obj_funcs+num_nonlin_ineq_] ); } } // h = [ A_lin_ineq' * x ] // [ response(num_obj_funcs+1:num_obj_funs+num_lin_eq) ] if( calc_h ) { // if( num_lin_ineq_ ) { DVectorSlice h_lin = (*h)(1,num_lin_ineq_); const Dakota::RealMatrix &lin_ineq_jac = dakota_rsqp_opt_->lin_ineq_jac(); for( int j = 0; j < num_lin_ineq_; ++j ) { for( int i = 0; i < n_orig_; ++i ) { h_lin[j] += lin_ineq_jac[j][i] * x_orig[i]; } } } // if( num_nonlin_ineq_ ) { const value_type *nonlin_ineq_ptr = &const_cast<Dakota::RealVector&>(local_function_values)[num_obj_funcs]; (*h)(num_lin_ineq_+1,num_lin_ineq_+num_nonlin_ineq_) = DVectorSlice(const_cast<value_type*>(nonlin_ineq_ptr),num_nonlin_ineq_); // Save in dakota_functions std::copy( nonlin_ineq_ptr, nonlin_ineq_ptr + num_nonlin_ineq_, &dakota_functions_[num_obj_funcs] ); } } // Gc = [ A_lin_eq' ] // [ response.grad[num_nonlin_ineq+1]' ] // [ .. ] // [ response.grad[num_nonlin_ineq+num_nonlin_eq]' ] if( calc_Gc ) { index_type nz = 0; if( num_lin_eq_ ) { const Dakota::RealMatrix &lin_eq_jac = dakota_rsqp_opt_->lin_eq_jac(); for( int j = 1; j <= num_lin_eq_; ++j ) { for( int i = 1; i <= n_orig_; ++i ) { (*Gc_val)[nz] = lin_eq_jac[j-1][i-1]; (*Gc_ivect)[nz] = i; (*Gc_jvect)[nz] = j; ++nz; } } } if( num_nonlin_eq_ ) { for( int j = 1; j <= num_nonlin_eq_; ++j ) { for( int i = 1; i <= n_orig_; ++i ) { (*Gc_val)[nz] = local_function_gradients[num_obj_funcs+num_nonlin_ineq_+j-1][i-1]; (*Gc_ivect)[nz] = i; (*Gc_jvect)[nz] = j + num_lin_eq_; ++nz; } } } } // Gh = [ A_lin_ineq' ] // [ response.grad[1]' ] // [ .. ] // [ response.grad[num_nonlin_eq]' ] if( calc_Gh ) { index_type nz = 0; if( num_lin_ineq_ ) { const Dakota::RealMatrix &lin_ineq_jac = dakota_rsqp_opt_->lin_ineq_jac(); for( int j = 1; j <= num_lin_ineq_; ++j ) { for( int i = 1; i <= n_orig_; ++i ) { (*Gh_val)[nz] = lin_ineq_jac[j-1][i-1]; (*Gh_ivect)[nz] = i; (*Gh_jvect)[nz] = j; ++nz; } } } if( num_nonlin_ineq_ ) { for( int j = 1; j <= num_nonlin_ineq_; ++j ) { for( int i = 1; i <= n_orig_; ++i ) { (*Gh_val)[nz] = local_function_gradients[num_obj_funcs+j-1][i-1]; (*Gh_ivect)[nz] = i; (*Gh_jvect)[nz] = j; ++nz; } } } } } else { // // The evaluation failed so just fill everything with // and invalid number that will trigger a back tracking // // f, Gf if(calc_f) *f = invalid_func_value_; if(calc_Gf) *Gf = invalid_func_value_; // c if(calc_c) *c = invalid_func_value_; // h if(calc_h) *h = invalid_func_value_; // Gc if(calc_Gc) { index_type nz = 0; for( int j = 1; j <= m_orig_; ++j ) { for( int i = 1; i <= n_orig_; ++i ) { (*Gc_val)[nz] = invalid_func_value_; (*Gc_ivect)[nz] = i; (*Gc_jvect)[nz] = j; ++nz; } } } // Gh if( calc_Gh ) { index_type nz = 0; for( int j = 1; j <= mI_orig_; ++j ) { for( int i = 1; i <= n_orig_; ++i ) { (*Gh_val)[nz] = invalid_func_value_; (*Gh_ivect)[nz] = i; (*Gh_jvect)[nz] = j; ++nz; } } } } if(calc_f) f_orig_updated_ = true; if(calc_c) c_orig_updated_ = true; if(calc_h) h_orig_updated_ = true; if(calc_Gf) Gf_orig_updated_ = true; if(calc_Gc) Gc_orig_updated_ = true; if(calc_Gh) Gh_orig_updated_ = true; }
bool TangentialStepWithInequStd_Step::do_step( Algorithm& _algo, poss_type step_poss, IterationPack::EDoStepType type ,poss_type assoc_step_poss ) { using Teuchos::RCP; using Teuchos::dyn_cast; using ::fabs; using LinAlgOpPack::Vt_S; using LinAlgOpPack::V_VpV; using LinAlgOpPack::V_VmV; using LinAlgOpPack::Vp_StV; using LinAlgOpPack::Vp_V; using LinAlgOpPack::V_StV; using LinAlgOpPack::V_MtV; // using ConstrainedOptPack::min_abs; using AbstractLinAlgPack::max_near_feas_step; typedef VectorMutable::vec_mut_ptr_t vec_mut_ptr_t; NLPAlgo &algo = rsqp_algo(_algo); NLPAlgoState &s = algo.rsqp_state(); EJournalOutputLevel olevel = algo.algo_cntr().journal_output_level(); EJournalOutputLevel ns_olevel = algo.algo_cntr().null_space_journal_output_level(); std::ostream &out = algo.track().journal_out(); //const bool check_results = algo.algo_cntr().check_results(); // print step header. if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { using IterationPack::print_algorithm_step; print_algorithm_step( algo, step_poss, type, assoc_step_poss, out ); } // problem dimensions const size_type //n = algo.nlp().n(), m = algo.nlp().m(), r = s.equ_decomp().size(); // Get the iteration quantity container objects IterQuantityAccess<value_type> &alpha_iq = s.alpha(), &zeta_iq = s.zeta(), &eta_iq = s.eta(); IterQuantityAccess<VectorMutable> &dl_iq = dl_iq_(s), &du_iq = du_iq_(s), &nu_iq = s.nu(), *c_iq = m > 0 ? &s.c() : NULL, *lambda_iq = m > 0 ? &s.lambda() : NULL, &rGf_iq = s.rGf(), &w_iq = s.w(), &qp_grad_iq = s.qp_grad(), &py_iq = s.py(), &pz_iq = s.pz(), &Ypy_iq = s.Ypy(), &Zpz_iq = s.Zpz(); IterQuantityAccess<MatrixOp> &Z_iq = s.Z(), //*Uz_iq = (m > r) ? &s.Uz() : NULL, *Uy_iq = (m > r) ? &s.Uy() : NULL; IterQuantityAccess<MatrixSymOp> &rHL_iq = s.rHL(); IterQuantityAccess<ActSetStats> &act_set_stats_iq = act_set_stats_(s); // Accessed/modified/updated (just some) VectorMutable *Ypy_k = (m ? &Ypy_iq.get_k(0) : NULL); const MatrixOp &Z_k = Z_iq.get_k(0); VectorMutable &pz_k = pz_iq.set_k(0); VectorMutable &Zpz_k = Zpz_iq.set_k(0); // Comupte qp_grad which is an approximation to rGf + Z'*HL*Y*py // qp_grad = rGf VectorMutable &qp_grad_k = ( qp_grad_iq.set_k(0) = rGf_iq.get_k(0) ); // qp_grad += zeta * w if( w_iq.updated_k(0) ) { if(zeta_iq.updated_k(0)) Vp_StV( &qp_grad_k, zeta_iq.get_k(0), w_iq.get_k(0) ); else Vp_V( &qp_grad_k, w_iq.get_k(0) ); } // // Set the bounds for: // // dl <= Z*pz + Y*py <= du -> dl - Ypy <= Z*pz <= du - Ypz vec_mut_ptr_t bl = s.space_x().create_member(), bu = s.space_x().create_member(); if(m) { // bl = dl_k - Ypy_k V_VmV( bl.get(), dl_iq.get_k(0), *Ypy_k ); // bu = du_k - Ypy_k V_VmV( bu.get(), du_iq.get_k(0), *Ypy_k ); } else { *bl = dl_iq.get_k(0); *bu = du_iq.get_k(0); } // Print out the QP bounds for the constraints if( static_cast<int>(ns_olevel) >= static_cast<int>(PRINT_VECTORS) ) { out << "\nqp_grad_k = \n" << qp_grad_k; } if( static_cast<int>(olevel) >= static_cast<int>(PRINT_VECTORS) ) { out << "\nbl = \n" << *bl; out << "\nbu = \n" << *bu; } // // Determine if we should perform a warm start or not. // bool do_warm_start = false; if( act_set_stats_iq.updated_k(-1) ) { if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << "\nDetermining if the QP should use a warm start ...\n"; } // We need to see if we should preform a warm start for the next iteration ActSetStats &stats = act_set_stats_iq.get_k(-1); const size_type num_active = stats.num_active(), num_adds = stats.num_adds(), num_drops = stats.num_drops(); const value_type frac_same = ( num_adds == ActSetStats::NOT_KNOWN || num_active == 0 ? 0.0 : my_max(((double)(num_active)-num_adds-num_drops) / num_active, 0.0 ) ); do_warm_start = ( num_active > 0 && frac_same >= warm_start_frac() ); if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << "\nnum_active = " << num_active; if( num_active ) { out << "\nmax(num_active-num_adds-num_drops,0)/(num_active) = " << "max("<<num_active<<"-"<<num_adds<<"-"<<num_drops<<",0)/("<<num_active<<") = " << frac_same; if( do_warm_start ) out << " >= "; else out << " < "; out << "warm_start_frac = " << warm_start_frac(); } if( do_warm_start ) out << "\nUse a warm start this time!\n"; else out << "\nDon't use a warm start this time!\n"; } } // Use active set from last iteration as an estimate for current active set // if we are to use a warm start. // // ToDo: If the selection of dependent and independent variables changes // then you will have to adjust this or not perform a warm start at all! if( do_warm_start ) { nu_iq.set_k(0,-1); } else { nu_iq.set_k(0) = 0.0; // No guess of the active set } VectorMutable &nu_k = nu_iq.get_k(0); // // Setup the reduced QP subproblem // // The call to the QP is setup for the more flexible call to the QPSolverRelaxed // interface to deal with the three independent variabilities: using simple // bounds for pz or not, general inequalities included or not, and extra equality // constraints included or not. // If this method of calling the QP solver were not used then 4 separate // calls to solve_qp(...) would have to be included to handle the four possible // QP formulations. // // The numeric arguments for the QP solver (in the nomenclatrue of QPSolverRelaxed) const value_type qp_bnd_inf = NLP::infinite_bound(); const Vector &qp_g = qp_grad_k; const MatrixSymOp &qp_G = rHL_iq.get_k(0); const value_type qp_etaL = 0.0; vec_mut_ptr_t qp_dL = Teuchos::null; vec_mut_ptr_t qp_dU = Teuchos::null; Teuchos::RCP<const MatrixOp> qp_E = Teuchos::null; BLAS_Cpp::Transp qp_trans_E = BLAS_Cpp::no_trans; vec_mut_ptr_t qp_b = Teuchos::null; vec_mut_ptr_t qp_eL = Teuchos::null; vec_mut_ptr_t qp_eU = Teuchos::null; Teuchos::RCP<const MatrixOp> qp_F = Teuchos::null; BLAS_Cpp::Transp qp_trans_F = BLAS_Cpp::no_trans; vec_mut_ptr_t qp_f = Teuchos::null; value_type qp_eta = 0.0; VectorMutable &qp_d = pz_k; // pz_k will be updated directly! vec_mut_ptr_t qp_nu = Teuchos::null; vec_mut_ptr_t qp_mu = Teuchos::null; vec_mut_ptr_t qp_Ed = Teuchos::null; vec_mut_ptr_t qp_lambda = Teuchos::null; // // Determine if we can use simple bounds on pz. // // If we have a variable-reduction null-space matrix // (with any choice for Y) then: // // d = Z*pz + (1-eta) * Y*py // // [ d(var_dep) ] = [ D ] * pz + (1-eta) * [ Ypy(var_dep) ] // [ d(var_indep) ] [ I ] [ Ypy(var_indep) ] // // For a cooridinate decomposition (Y = [ I ; 0 ]) then Ypy(var_indep) == // 0.0 and in this case the bounds on d(var_indep) become simple bounds on // pz even with the relaxation. Also, if dl(var_dep) and du(var_dep) are // unbounded, then we can also use simple bounds since we don't need the // relaxation and we can set eta=0. In this case we just have to subtract // from the upper and lower bounds on pz! // // Otherwise, we can not use simple variable bounds and implement the // relaxation properly. // const MatrixIdentConcat *Zvr = dynamic_cast<const MatrixIdentConcat*>( &Z_k ); const Range1D var_dep = Zvr ? Zvr->D_rng() : Range1D::Invalid, var_indep = Zvr ? Zvr->I_rng() : Range1D(); RCP<Vector> Ypy_indep; const value_type Ypy_indep_norm_inf = ( m ? (Ypy_indep=Ypy_k->sub_view(var_indep))->norm_inf() : 0.0); if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) out << "\nDetermine if we can use simple bounds on pz ...\n" << " m = " << m << std::endl << " dynamic_cast<const MatrixIdentConcat*>(&Z_k) = " << Zvr << std::endl << " ||Ypy_k(var_indep)||inf = " << Ypy_indep_norm_inf << std::endl; const bool bounded_var_dep = ( m > 0 && num_bounded( *bl->sub_view(var_dep), *bu->sub_view(var_dep), qp_bnd_inf ) ); const bool use_simple_pz_bounds = ( m == 0 || ( Zvr != NULL && ( Ypy_indep_norm_inf == 0.0 || bounded_var_dep == 0 ) ) ); if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) out << (use_simple_pz_bounds ? "\nUsing simple bounds on pz ...\n" : "\nUsing bounds on full Z*pz ...\n") << (bounded_var_dep ? "\nThere are finite bounds on dependent variables. Adding extra inequality constrints for D*pz ...\n" : "\nThere are no finite bounds on dependent variables. There will be no extra inequality constraints added on D*pz ...\n" ) ; if( use_simple_pz_bounds ) { // Set simple bound constraints on pz qp_dL = bl->sub_view(var_indep); qp_dU = bu->sub_view(var_indep); qp_nu = nu_k.sub_view(var_indep); // nu_k(var_indep) will be updated directly! if( m && bounded_var_dep ) { // Set general inequality constraints for D*pz qp_E = Teuchos::rcp(&Zvr->D(),false); qp_b = Ypy_k->sub_view(var_dep); qp_eL = bl->sub_view(var_dep); qp_eU = bu->sub_view(var_dep); qp_mu = nu_k.sub_view(var_dep); // nu_k(var_dep) will be updated directly! qp_Ed = Zpz_k.sub_view(var_dep); // Zpz_k(var_dep) will be updated directly! } else { // Leave these as NULL since there is no extra general inequality constraints } } else if( !use_simple_pz_bounds ) { // ToDo: Leave out parts for unbounded dependent variables! // There are no simple bounds! (leave qp_dL, qp_dU and qp_nu as null) // Set general inequality constraints for Z*pz qp_E = Teuchos::rcp(&Z_k,false); qp_b = Teuchos::rcp(Ypy_k,false); qp_eL = bl; qp_eU = bu; qp_mu = Teuchos::rcp(&nu_k,false); qp_Ed = Teuchos::rcp(&Zpz_k,false); // Zpz_k will be updated directly! } else { TEST_FOR_EXCEPT(true); } // Set the general equality constriants (if they exist) Range1D equ_undecomp = s.equ_undecomp(); if( m > r && m > 0 ) { // qp_f = Uy_k * py_k + c_k(equ_undecomp) qp_f = s.space_c().sub_space(equ_undecomp)->create_member(); V_MtV( qp_f.get(), Uy_iq->get_k(0), BLAS_Cpp::no_trans, py_iq.get_k(0) ); Vp_V( qp_f.get(), *c_iq->get_k(0).sub_view(equ_undecomp) ); // Must resize for the undecomposed constriants if it has not already been qp_F = Teuchos::rcp(&Uy_iq->get_k(0),false); qp_lambda = lambda_iq->set_k(0).sub_view(equ_undecomp); // lambda_k(equ_undecomp), will be updated directly! } // Setup the rest of the arguments QPSolverRelaxed::EOutputLevel qp_olevel; switch( olevel ) { case PRINT_NOTHING: qp_olevel = QPSolverRelaxed::PRINT_NONE; break; case PRINT_BASIC_ALGORITHM_INFO: qp_olevel = QPSolverRelaxed::PRINT_NONE; break; case PRINT_ALGORITHM_STEPS: qp_olevel = QPSolverRelaxed::PRINT_BASIC_INFO; break; case PRINT_ACTIVE_SET: qp_olevel = QPSolverRelaxed::PRINT_ITER_SUMMARY; break; case PRINT_VECTORS: qp_olevel = QPSolverRelaxed::PRINT_ITER_VECTORS; break; case PRINT_ITERATION_QUANTITIES: qp_olevel = QPSolverRelaxed::PRINT_EVERY_THING; break; default: TEST_FOR_EXCEPT(true); } // ToDo: Set print options so that only vectors matrices etc // are only printed in the null space // // Solve the QP // qp_solver().infinite_bound(qp_bnd_inf); const QPSolverStats::ESolutionType solution_type = qp_solver().solve_qp( int(olevel) == int(PRINT_NOTHING) ? NULL : &out ,qp_olevel ,( algo.algo_cntr().check_results() ? QPSolverRelaxed::RUN_TESTS : QPSolverRelaxed::NO_TESTS ) ,qp_g, qp_G, qp_etaL, qp_dL.get(), qp_dU.get() ,qp_E.get(), qp_trans_E, qp_E.get() ? qp_b.get() : NULL ,qp_E.get() ? qp_eL.get() : NULL, qp_E.get() ? qp_eU.get() : NULL ,qp_F.get(), qp_trans_F, qp_F.get() ? qp_f.get() : NULL ,NULL // obj_d ,&qp_eta, &qp_d ,qp_nu.get() ,qp_mu.get(), qp_E.get() ? qp_Ed.get() : NULL ,qp_F.get() ? qp_lambda.get() : NULL ,NULL // qp_Fd ); // // Check the optimality conditions for the QP // std::ostringstream omsg; bool throw_qp_failure = false; if( qp_testing() == QP_TEST || ( qp_testing() == QP_TEST_DEFAULT && algo.algo_cntr().check_results() ) ) { if( int(olevel) >= int(PRINT_ALGORITHM_STEPS) ) { out << "\nChecking the optimality conditions of the reduced QP subproblem ...\n"; } if(!qp_tester().check_optimality_conditions( solution_type,qp_solver().infinite_bound() ,int(olevel) == int(PRINT_NOTHING) ? NULL : &out ,int(olevel) >= int(PRINT_VECTORS) ? true : false ,int(olevel) >= int(PRINT_ITERATION_QUANTITIES) ? true : false ,qp_g, qp_G, qp_etaL, qp_dL.get(), qp_dU.get() ,qp_E.get(), qp_trans_E, qp_E.get() ? qp_b.get() : NULL ,qp_E.get() ? qp_eL.get() : NULL, qp_E.get() ? qp_eU.get() : NULL ,qp_F.get(), qp_trans_F, qp_F.get() ? qp_f.get() : NULL ,NULL // obj_d ,&qp_eta, &qp_d ,qp_nu.get() ,qp_mu.get(), qp_E.get() ? qp_Ed.get() : NULL ,qp_F.get() ? qp_lambda.get() : NULL ,NULL // qp_Fd )) { omsg << "\n*** Alert! at least one of the QP optimality conditions did not check out.\n"; if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << omsg.str(); } throw_qp_failure = true; } } // // Set the solution // if( !use_simple_pz_bounds ) { // Everything is already updated! } else if( use_simple_pz_bounds ) { // Just have to set Zpz_k(var_indep) = pz_k *Zpz_k.sub_view(var_indep) = pz_k; if( m && !bounded_var_dep ) { // Must compute Zpz_k(var_dep) = D*pz LinAlgOpPack::V_MtV( &*Zpz_k.sub_view(var_dep), Zvr->D(), BLAS_Cpp::no_trans, pz_k ); // ToDo: Remove the compuation of Zpz here unless you must } } else { TEST_FOR_EXCEPT(true); } // Set the solution statistics qp_solver_stats_(s).set_k(0) = qp_solver().get_qp_stats(); // Cut back Ypy_k = (1-eta) * Ypy_k const value_type eps = std::numeric_limits<value_type>::epsilon(); if( fabs(qp_eta - 0.0) > eps ) { if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << "\n*** Alert! the QP was infeasible (eta = "<<qp_eta<<"). Cutting back Ypy_k = (1.0 - eta)*Ypy_k ...\n"; } Vt_S( Ypy_k, 1.0 - qp_eta ); } // eta_k eta_iq.set_k(0) = qp_eta; // // Modify the solution if we have to! // switch(solution_type) { case QPSolverStats::OPTIMAL_SOLUTION: break; // we are good! case QPSolverStats::PRIMAL_FEASIBLE_POINT: { omsg << "\n*** Alert! the returned QP solution is PRIMAL_FEASIBLE_POINT but not optimal!\n"; if( primal_feasible_point_error() ) omsg << "\n*** primal_feasible_point_error == true, this is an error!\n"; if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << omsg.str(); } throw_qp_failure = primal_feasible_point_error(); break; } case QPSolverStats::DUAL_FEASIBLE_POINT: { omsg << "\n*** Alert! the returned QP solution is DUAL_FEASIBLE_POINT" << "\n*** but not optimal so we cut back the step ...\n"; if( dual_feasible_point_error() ) omsg << "\n*** dual_feasible_point_error == true, this is an error!\n"; if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << omsg.str(); } // Cut back the step to fit in the bounds // // dl <= u*(Ypy_k+Zpz_k) <= du // vec_mut_ptr_t zero = s.space_x().create_member(0.0), d_tmp = s.space_x().create_member(); V_VpV( d_tmp.get(), *Ypy_k, Zpz_k ); const std::pair<value_type,value_type> u_steps = max_near_feas_step( *zero, *d_tmp, dl_iq.get_k(0), du_iq.get_k(0), 0.0 ); const value_type u = my_min( u_steps.first, 1.0 ); // largest positive step size alpha_iq.set_k(0) = u; if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << "\nFinding u s.t. dl <= u*(Ypy_k+Zpz_k) <= du\n" << "max step length u = " << u << std::endl << "alpha_k = u = " << alpha_iq.get_k(0) << std::endl; } throw_qp_failure = dual_feasible_point_error(); break; } case QPSolverStats::SUBOPTIMAL_POINT: { omsg << "\n*** Alert!, the returned QP solution is SUBOPTIMAL_POINT!\n"; if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << omsg.str(); } throw_qp_failure = true; break; } default: TEST_FOR_EXCEPT(true); // should not happen! } // // Output the final solution! // if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << "\n||pz_k||inf = " << s.pz().get_k(0).norm_inf() << "\nnu_k.nz() = " << s.nu().get_k(0).nz() << "\nmax(|nu_k(i)|) = " << s.nu().get_k(0).norm_inf() // << "\nmin(|nu_k(i)|) = " << min_abs( s.nu().get_k(0)() ) ; if( m > r ) out << "\n||lambda_k(undecomp)||inf = " << s.lambda().get_k(0).norm_inf(); out << "\n||Zpz_k||2 = " << s.Zpz().get_k(0).norm_2() ; if(qp_eta > 0.0) out << "\n||Ypy||2 = " << s.Ypy().get_k(0).norm_2(); out << std::endl; } if( static_cast<int>(ns_olevel) >= static_cast<int>(PRINT_VECTORS) ) { out << "\npz_k = \n" << s.pz().get_k(0); if(var_indep.size()) out << "\nnu_k(var_indep) = \n" << *s.nu().get_k(0).sub_view(var_indep); } if( static_cast<int>(ns_olevel) >= static_cast<int>(PRINT_VECTORS) ) { if(var_indep.size()) out << "\nZpz(var_indep)_k = \n" << *s.Zpz().get_k(0).sub_view(var_indep); out << std::endl; } if( static_cast<int>(olevel) >= static_cast<int>(PRINT_VECTORS) ) { if(var_dep.size()) out << "\nZpz(var_dep)_k = \n" << *s.Zpz().get_k(0).sub_view(var_dep); out << "\nZpz_k = \n" << s.Zpz().get_k(0); out << std::endl; } if( static_cast<int>(olevel) >= static_cast<int>(PRINT_VECTORS) ) { out << "\nnu_k = \n" << s.nu().get_k(0); if(var_dep.size()) out << "\nnu_k(var_dep) = \n" << *s.nu().get_k(0).sub_view(var_dep); if( m > r ) out << "\nlambda_k(equ_undecomp) = \n" << *s.lambda().get_k(0).sub_view(equ_undecomp); if(qp_eta > 0.0) out << "\nYpy = \n" << s.Ypy().get_k(0); } if( qp_eta == 1.0 ) { omsg << "TangentialStepWithInequStd_Step::do_step(...) : Error, a QP relaxation parameter\n" << "of eta = " << qp_eta << " was calculated and therefore it must be assumed\n" << "that the NLP's constraints are infeasible\n" << "Throwing an InfeasibleConstraints exception!\n"; if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << omsg.str(); } throw InfeasibleConstraints(omsg.str()); } if( throw_qp_failure ) throw QPFailure( omsg.str(), qp_solver().get_qp_stats() ); return true; }
bool TangentialStepIP_Step::do_step( Algorithm& _algo, poss_type step_poss, IterationPack::EDoStepType type ,poss_type assoc_step_poss ) { using BLAS_Cpp::no_trans; using Teuchos::dyn_cast; using AbstractLinAlgPack::assert_print_nan_inf; using LinAlgOpPack::Vt_S; using LinAlgOpPack::Vp_StV; using LinAlgOpPack::V_StV; using LinAlgOpPack::V_MtV; using LinAlgOpPack::V_InvMtV; using LinAlgOpPack::M_StM; using LinAlgOpPack::Mp_StM; using LinAlgOpPack::assign; NLPAlgo &algo = rsqp_algo(_algo); IpState &s = dyn_cast<IpState>(_algo.state()); EJournalOutputLevel olevel = algo.algo_cntr().journal_output_level(); std::ostream& out = algo.track().journal_out(); // print step header. if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { using IterationPack::print_algorithm_step; print_algorithm_step( algo, step_poss, type, assoc_step_poss, out ); } // Compute qp_grad which is an approximation to rGf + Z'*(mu*(invXu*e-invXl*e) + no_cross_term // minimize round off error by calc'ing Z'*(Gf + mu*(invXu*e-invXl*e)) // qp_grad_k = Z'*(Gf + mu*(invXu*e-invXl*e)) const MatrixSymDiagStd &invXu = s.invXu().get_k(0); const MatrixSymDiagStd &invXl = s.invXl().get_k(0); const value_type &mu = s.barrier_parameter().get_k(0); const MatrixOp &Z_k = s.Z().get_k(0); Teuchos::RCP<VectorMutable> rhs = s.Gf().get_k(0).clone(); Vp_StV( rhs.get(), mu, invXu.diag() ); Vp_StV( rhs.get(), -1.0*mu, invXl.diag() ); if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) { out << "\n||Gf_k + mu_k*(invXu_k-invXl_k)||inf = " << rhs->norm_inf() << std::endl; } if( (int)olevel >= (int)PRINT_VECTORS) { out << "\nGf_k + mu_k*(invXu_k-invXl_k) =\n" << *rhs; } VectorMutable &qp_grad_k = s.qp_grad().set_k(0); V_MtV(&qp_grad_k, Z_k, BLAS_Cpp::trans, *rhs); if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) { out << "\n||Z_k'*(Gf_k + mu_k*(invXu_k-invXl_k))||inf = " << qp_grad_k.norm_inf() << std::endl; } if( (int)olevel >= (int)PRINT_VECTORS ) { out << "\nZ_k'*(Gf_k + mu_k*(invXu_k-invXl_k)) =\n" << qp_grad_k; } // error check for cross term value_type &zeta = s.zeta().set_k(0); const Vector &w_sigma = s.w_sigma().get_k(0); // need code to calculate damping parameter zeta = 1.0; Vp_StV(&qp_grad_k, zeta, w_sigma); if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) { out << "\n||qp_grad_k||inf = " << qp_grad_k.norm_inf() << std::endl; } if( (int)olevel >= (int)PRINT_VECTORS ) { out << "\nqp_grad_k =\n" << qp_grad_k; } // build the "Hessian" term B = rHL + rHB // should this be MatrixSymOpNonsing const MatrixSymOp &rHL_k = s.rHL().get_k(0); const MatrixSymOp &rHB_k = s.rHB().get_k(0); MatrixSymOpNonsing &B_k = dyn_cast<MatrixSymOpNonsing>(s.B().set_k(0)); if (B_k.cols() != Z_k.cols()) { // Initialize space in rHB dyn_cast<MatrixSymInitDiag>(B_k).init_identity(Z_k.space_rows(), 0.0); } // M_StM(&B_k, 1.0, rHL_k, no_trans); assign(&B_k, rHL_k, BLAS_Cpp::no_trans); if( (int)olevel >= (int)PRINT_VECTORS ) { out << "\nB_k = rHL_k =\n" << B_k; } Mp_StM(&B_k, 1.0, rHB_k, BLAS_Cpp::no_trans); if( (int)olevel >= (int)PRINT_VECTORS ) { out << "\nB_k = rHL_k + rHB_k =\n" << B_k; } // Solve the system pz = - inv(rHL) * qp_grad VectorMutable &pz_k = s.pz().set_k(0); V_InvMtV( &pz_k, B_k, no_trans, qp_grad_k ); Vt_S( &pz_k, -1.0 ); // Zpz = Z * pz V_MtV( &s.Zpz().set_k(0), s.Z().get_k(0), no_trans, pz_k ); if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) { out << "\n||pz||inf = " << s.pz().get_k(0).norm_inf() << "\nsum(Zpz) = " << AbstractLinAlgPack::sum(s.Zpz().get_k(0)) << std::endl; } if( (int)olevel >= (int)PRINT_VECTORS ) { out << "\npz_k = \n" << s.pz().get_k(0); out << "\nnu_k = \n" << s.nu().get_k(0); out << "\nZpz_k = \n" << s.Zpz().get_k(0); out << std::endl; } if(algo.algo_cntr().check_results()) { assert_print_nan_inf(s.pz().get_k(0), "pz_k",true,&out); assert_print_nan_inf(s.Zpz().get_k(0), "Zpz_k",true,&out); } return true; }
bool PostEvalNewPointBarrier_Step::do_step( Algorithm& _algo, poss_type step_poss, IterationPack::EDoStepType type ,poss_type assoc_step_poss ) { using Teuchos::dyn_cast; using IterationPack::print_algorithm_step; using AbstractLinAlgPack::inv_of_difference; using AbstractLinAlgPack::correct_upper_bound_multipliers; using AbstractLinAlgPack::correct_lower_bound_multipliers; using LinAlgOpPack::Vp_StV; NLPAlgo &algo = dyn_cast<NLPAlgo>(_algo); IpState &s = dyn_cast<IpState>(_algo.state()); NLP &nlp = algo.nlp(); EJournalOutputLevel olevel = algo.algo_cntr().journal_output_level(); std::ostream& out = algo.track().journal_out(); if(!nlp.is_initialized()) nlp.initialize(algo.algo_cntr().check_results()); // print step header. if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { using IterationPack::print_algorithm_step; print_algorithm_step( _algo, step_poss, type, assoc_step_poss, out ); } IterQuantityAccess<VectorMutable> &x_iq = s.x(); IterQuantityAccess<MatrixSymDiagStd> &Vl_iq = s.Vl(); IterQuantityAccess<MatrixSymDiagStd> &Vu_iq = s.Vu(); ///*********************************************************** // Calculate invXl = diag(1/(x-xl)) // and invXu = diag(1/(xu-x)) matrices ///*********************************************************** // get references to x, invXl, and invXu VectorMutable& x = x_iq.get_k(0); MatrixSymDiagStd& invXu = s.invXu().set_k(0); MatrixSymDiagStd& invXl = s.invXl().set_k(0); //std::cout << "xu=\n"; //nlp.xu().output(std::cout); inv_of_difference(1.0, nlp.xu(), x, &invXu.diag()); inv_of_difference(1.0, x, nlp.xl(), &invXl.diag()); //std::cout << "invXu=\v"; //invXu.output(std::cout); //std::cout << "\ninvXl=\v"; //invXl.output(std::cout); // Check for divide by zeros - using AbstractLinAlgPack::assert_print_nan_inf; assert_print_nan_inf(invXu.diag(), "invXu", true, &out); assert_print_nan_inf(invXl.diag(), "invXl", true, &out); // These should never go negative either - could be a useful check // Initialize Vu and Vl if necessary if ( /*!Vu_iq.updated_k(0) */ Vu_iq.last_updated() == IterQuantity::NONE_UPDATED ) { VectorMutable& vu = Vu_iq.set_k(0).diag(); vu = 0; Vp_StV(&vu, s.barrier_parameter().get_k(-1), invXu.diag()); if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << "\nInitialize Vu with barrier_parameter * invXu ...\n"; } } if ( /*!Vl_iq.updated_k(0) */ Vl_iq.last_updated() == IterQuantity::NONE_UPDATED ) { VectorMutable& vl = Vl_iq.set_k(0).diag(); vl = 0; Vp_StV(&vl, s.barrier_parameter().get_k(-1), invXl.diag()); if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << "\nInitialize Vl with barrier_parameter * invXl ...\n"; } } if (s.num_basis().updated_k(0)) { // Basis changed, reorder Vl and Vu if (Vu_iq.updated_k(-1)) { Vu_iq.set_k(0,-1); } if (Vl_iq.updated_k(-1)) { Vl_iq.set_k(0,-1); } VectorMutable& vu = Vu_iq.set_k(0).diag(); VectorMutable& vl = Vl_iq.set_k(0).diag(); s.P_var_last().permute( BLAS_Cpp::trans, &vu ); // Permute back to original order s.P_var_last().permute( BLAS_Cpp::trans, &vl ); // Permute back to original order if( (int)olevel >= (int)PRINT_VECTORS ) { out << "\nx resorted vl and vu to the original order\n" << x; } s.P_var_current().permute( BLAS_Cpp::no_trans, &vu ); // Permute to new (current) order s.P_var_current().permute( BLAS_Cpp::no_trans, &vl ); // Permute to new (current) order if( (int)olevel >= (int)PRINT_VECTORS ) { out << "\nx resorted to new basis \n" << x; } } correct_upper_bound_multipliers(nlp.xu(), +NLP::infinite_bound(), &Vu_iq.get_k(0).diag()); correct_lower_bound_multipliers(nlp.xl(), -NLP::infinite_bound(), &Vl_iq.get_k(0).diag()); if( (int)olevel >= (int)PRINT_VECTORS ) { out << "x=\n" << s.x().get_k(0); out << "xl=\n" << nlp.xl(); out << "vl=\n" << s.Vl().get_k(0).diag(); out << "xu=\n" << nlp.xu(); out << "vu=\n" << s.Vu().get_k(0).diag(); } // Update general algorithm bound multipliers VectorMutable& v = s.nu().set_k(0); v = Vu_iq.get_k(0).diag(); Vp_StV(&v,-1.0,Vl_iq.get_k(0).diag()); // Print vector information if( static_cast<int>(olevel) >= static_cast<int>(PRINT_VECTORS) ) { out << "invXu_k.diag()=\n" << invXu.diag(); out << "invXl_k.diag()=\n" << invXl.diag(); out << "Vu_k.diag()=\n" << Vu_iq.get_k(0).diag(); out << "Vl_k.diag()=\n" << Vl_iq.get_k(0).diag(); out << "nu_k=\n" << s.nu().get_k(0); } return true; }