ObjectiveFromBoundConstraint( const BoundConstraint<Real> &bc ) : lo_( bc.getLowerVectorRCP() ), up_( bc.getUpperVectorRCP() ) { a_ = lo_->clone(); b_ = up_->clone(); }
void updateIterate(Vector<Real> &xnew, const Vector<Real> &x, const Vector<Real> &s, Real alpha, BoundConstraint<Real> &con ) { xnew.set(x); xnew.axpy(alpha,s); if ( con.isActivated() ) { con.project(xnew); } }
ObjectiveFromBoundConstraint( const BoundConstraint<Real> &bc, Teuchos::ParameterList &parlist ) : lo_( bc.getLowerVectorRCP() ), up_( bc.getUpperVectorRCP() ) { a_ = lo_->clone(); b_ = up_->clone(); std::string bfstring = parlist.sublist("Barrier Function").get("Type","Logarithmic"); btype_ = StringToEBarrierType(bfstring); }
bool BoundConjunctiveConstraint::Subsume(BoundConstraint* that) const { BoundConstraint* left = Left(); BoundConstraint* right = Right(); if (that->IsBinaryConstraint()) { BoundBinaryConstraint* thatBinaryConstraint = static_cast<BoundBinaryConstraint*>(that); BoundConstraint* thatLeft = thatBinaryConstraint->Left(); BoundConstraint* thatRight = thatBinaryConstraint->Right(); bool leftSubsumeThatLeft = left->Subsume(thatLeft); bool rightSubsumeThatLeft = right->Subsume(thatLeft); bool leftSubsumeThatRight = left->Subsume(thatRight); bool rightSubsumeThatRight = right->Subsume(thatRight); bool leftOrRightSubsumeThatLeft = leftSubsumeThatLeft || rightSubsumeThatLeft; bool leftOrRightSubsumeThatRight = leftSubsumeThatRight || rightSubsumeThatRight; if (that->IsConjunctiveConstraint()) { return leftOrRightSubsumeThatLeft && leftOrRightSubsumeThatRight; } else if (that->IsDisjunctiveConstraint()) { return leftOrRightSubsumeThatLeft || leftOrRightSubsumeThatRight; } else // assert(false) { return false; } } else { bool leftSubsumeThat = left->Subsume(that); bool righSubsumeThat = right->Subsume(that); return leftSubsumeThat || righSubsumeThat; } }
void compute( Vector<Real> &s, const Vector<Real> &x, Objective<Real> &obj, BoundConstraint<Real> &bnd, AlgorithmState<Real> &algo_state ) { Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState(); // Compute projected secant step // ---> Apply inactive-inactive block of inverse secant to gradient gp_->set(*(step_state->gradientVec)); bnd.pruneActive(*gp_,*(step_state->gradientVec),x,algo_state.gnorm); secant_->applyH(s,*gp_); bnd.pruneActive(s,*(step_state->gradientVec),x,algo_state.gnorm); // ---> Add in active gradient components gp_->set(*(step_state->gradientVec)); bnd.pruneInactive(*d_,*(step_state->gradientVec),x,algo_state.gnorm); s.plus(gp_->dual()); s.scale(-1.0); }
/** \brief Compute the gradient-based criticality measure. The criticality measure is \f$\|x_k - P_{[a,b]}(x_k-\nabla f(x_k))\|_{\mathcal{X}}\f$. Here, \f$P_{[a,b]}\f$ denotes the projection onto the bound constraints. @param[in] x is the current iteration @param[in] obj is the objective function @param[in] con are the bound constraints @param[in] tol is a tolerance for inexact evaluations of the objective function */ Real computeCriticalityMeasure(Vector<Real> &x, Objective<Real> &obj, BoundConstraint<Real> &con, Real tol) { Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState(); obj.gradient(*(step_state->gradientVec),x,tol); xtmp_->set(x); xtmp_->axpy(-1.0,(step_state->gradientVec)->dual()); con.project(*xtmp_); xtmp_->axpy(-1.0,x); return xtmp_->norm(); }
void compute( Vector<Real> &s, const Vector<Real> &x, Objective<Real> &obj, BoundConstraint<Real> &bnd, AlgorithmState<Real> &algo_state ) { Real tol = std::sqrt(ROL_EPSILON<Real>()); Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState(); // Compute projected Newton step // ---> Apply inactive-inactive block of inverse hessian to gradient gp_->set(*(step_state->gradientVec)); bnd.pruneActive(*gp_,*(step_state->gradientVec),x,algo_state.gnorm); obj.invHessVec(s,*gp_,x,tol); bnd.pruneActive(s,*(step_state->gradientVec),x,algo_state.gnorm); // ---> Add in active gradient components gp_->set(*(step_state->gradientVec)); bnd.pruneInactive(*d_,*(step_state->gradientVec),x,algo_state.gnorm); s.plus(gp_->dual()); s.scale(-1.0); }
/** \brief Update step, if successful. Given a trial step, \f$s_k\f$, this function updates \f$x_{k+1}=x_k+s_k\f$. This function also updates the secant approximation. @param[in,out] x is the updated iterate @param[in] s is the computed trial step @param[in] obj is the objective function @param[in] con are the bound constraints @param[in] algo_state contains the current state of the algorithm */ void update( Vector<Real> &x, const Vector<Real> &s, Objective<Real> &obj, BoundConstraint<Real> &con, AlgorithmState<Real> &algo_state ) { Real tol = std::sqrt(ROL_EPSILON); Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState(); // Update iterate algo_state.iter++; x.axpy(1.0, s); // Compute new gradient if ( edesc_ == DESCENT_SECANT || (edesc_ == DESCENT_NEWTONKRYLOV && useSecantPrecond_) ) { gp_->set(*(step_state->gradientVec)); } obj.gradient(*(step_state->gradientVec),x,tol); algo_state.ngrad++; // Update Secant Information if ( edesc_ == DESCENT_SECANT || (edesc_ == DESCENT_NEWTONKRYLOV && useSecantPrecond_) ) { secant_->update(*(step_state->gradientVec),*gp_,s,algo_state.snorm,algo_state.iter+1); } // Update algorithm state (algo_state.iterateVec)->set(x); if ( con.isActivated() ) { if ( useProjectedGrad_ ) { gp_->set(*(step_state->gradientVec)); con.computeProjectedGradient( *gp_, x ); algo_state.gnorm = gp_->norm(); } else { d_->set(x); d_->axpy(-1.0,(step_state->gradientVec)->dual()); con.project(*d_); d_->axpy(-1.0,x); algo_state.gnorm = d_->norm(); } } else { algo_state.gnorm = (step_state->gradientVec)->norm(); } }
void update( Vector<Real> &x, const Vector<Real> &s, Objective<Real> &obj, BoundConstraint<Real> &bnd, AlgorithmState<Real> &algo_state ) { Real tol = std::sqrt(ROL_EPSILON<Real>()), one(1); Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState(); // Update iterate and store previous step algo_state.iter++; d_->set(x); x.plus(s); bnd.project(x); (step_state->descentVec)->set(x); (step_state->descentVec)->axpy(-one,*d_); algo_state.snorm = s.norm(); // Compute new gradient gp_->set(*(step_state->gradientVec)); obj.update(x,true,algo_state.iter); if ( computeObj_ ) { algo_state.value = obj.value(x,tol); algo_state.nfval++; } obj.gradient(*(step_state->gradientVec),x,tol); algo_state.ngrad++; // Update Secant Information secant_->updateStorage(x,*(step_state->gradientVec),*gp_,s,algo_state.snorm,algo_state.iter+1); // Update algorithm state (algo_state.iterateVec)->set(x); if ( useProjectedGrad_ ) { gp_->set(*(step_state->gradientVec)); bnd.computeProjectedGradient( *gp_, x ); algo_state.gnorm = gp_->norm(); } else { d_->set(x); d_->axpy(-one,(step_state->gradientVec)->dual()); bnd.project(*d_); d_->axpy(-one,x); algo_state.gnorm = d_->norm(); } }
Real GradDotStep(const Vector<Real> &g, const Vector<Real> &s, const Vector<Real> &x, BoundConstraint<Real> &bnd, Real eps = 0) { Real gs(0), one(1); if (!bnd.isActivated()) { gs = s.dot(g.dual()); } else { d_->set(s); bnd.pruneActive(*d_,g,x,eps); gs = d_->dot(g.dual()); d_->set(x); d_->axpy(-one,g.dual()); bnd.project(*d_); d_->scale(-one); d_->plus(x); bnd.pruneInactive(*d_,g,x,eps); gs -= d_->dot(g.dual()); } return gs; }
void initialize( Vector<Real> &x, const Vector<Real> &s, const Vector<Real> &g, Objective<Real> &obj, BoundConstraint<Real> &con, AlgorithmState<Real> &algo_state ) { Step<Real>::initialize(x,s,g,obj,con,algo_state); Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState(); lineSearch_->initialize(x, s, *(step_state->gradientVec),obj,con); if ( edesc_ == DESCENT_NEWTONKRYLOV || edesc_ == DESCENT_NEWTON || edesc_ == DESCENT_SECANT ) { Teuchos::RCP<Objective<Real> > obj_ptr = Teuchos::rcp(&obj, false); Teuchos::RCP<BoundConstraint<Real> > con_ptr = Teuchos::rcp(&con, false); hessian_ = Teuchos::rcp( new ProjectedHessian<Real>(secant_,obj_ptr,con_ptr,algo_state.iterateVec,step_state->gradientVec, useSecantHessVec_)); precond_ = Teuchos::rcp( new ProjectedPreconditioner<Real>(secant_,obj_ptr,con_ptr,algo_state.iterateVec, step_state->gradientVec,useSecantPrecond_)); } if ( con.isActivated() ) { d_ = s.clone(); } if ( con.isActivated() || edesc_ == DESCENT_SECANT || (edesc_ == DESCENT_NEWTONKRYLOV && useSecantPrecond_) ) { gp_ = g.clone(); } }
/** \brief Initialize step. This includes projecting the initial guess onto the constraints, computing the initial objective function value and gradient, and initializing the dual variables. @param[in,out] x is the initial guess @param[in] obj is the objective function @param[in] con are the bound constraints @param[in] algo_state is the current state of the algorithm */ void initialize( Vector<Real> &x, const Vector<Real> &s, const Vector<Real> &g, Objective<Real> &obj, BoundConstraint<Real> &con, AlgorithmState<Real> &algo_state ) { Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState(); // Initialize state descent direction and gradient storage step_state->descentVec = s.clone(); step_state->gradientVec = g.clone(); step_state->searchSize = 0.0; // Initialize additional storage xlam_ = x.clone(); x0_ = x.clone(); xbnd_ = x.clone(); As_ = s.clone(); xtmp_ = x.clone(); res_ = g.clone(); Ag_ = g.clone(); rtmp_ = g.clone(); gtmp_ = g.clone(); // Project x onto constraint set con.project(x); // Update objective function, get value, and get gradient Real tol = std::sqrt(ROL_EPSILON); obj.update(x,true,algo_state.iter); algo_state.value = obj.value(x,tol); algo_state.nfval++; algo_state.gnorm = computeCriticalityMeasure(x,obj,con,tol); algo_state.ngrad++; // Initialize dual variable lambda_ = s.clone(); lambda_->set((step_state->gradientVec)->dual()); lambda_->scale(-1.0); //con.setVectorToLowerBound(*lambda_); // Initialize Hessian and preconditioner Teuchos::RCP<Objective<Real> > obj_ptr = Teuchos::rcp(&obj, false); Teuchos::RCP<BoundConstraint<Real> > con_ptr = Teuchos::rcp(&con, false); hessian_ = Teuchos::rcp( new PrimalDualHessian<Real>(secant_,obj_ptr,con_ptr,algo_state.iterateVec,xlam_,useSecantHessVec_) ); precond_ = Teuchos::rcp( new PrimalDualPreconditioner<Real>(secant_,obj_ptr,con_ptr,algo_state.iterateVec,xlam_, useSecantPrecond_) ); }
/** \brief Update step, if successful. This function returns \f$x_{k+1} = x_k + s_k\f$. It also updates secant information if being used. @param[in] x is the new iterate @param[out] s is the step computed via PDAS @param[in] obj is the objective function @param[in] con are the bound constraints @param[in] algo_state is the current state of the algorithm */ void update( Vector<Real> &x, const Vector<Real> &s, Objective<Real> &obj, BoundConstraint<Real> &con, AlgorithmState<Real> &algo_state ) { Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState(); x.plus(s); feasible_ = con.isFeasible(x); algo_state.snorm = s.norm(); algo_state.iter++; Real tol = std::sqrt(ROL_EPSILON); obj.update(x,true,algo_state.iter); algo_state.value = obj.value(x,tol); algo_state.nfval++; if ( secant_ != Teuchos::null ) { gtmp_->set(*(step_state->gradientVec)); } algo_state.gnorm = computeCriticalityMeasure(x,obj,con,tol); algo_state.ngrad++; if ( secant_ != Teuchos::null ) { secant_->update(*(step_state->gradientVec),*gtmp_,s,algo_state.snorm,algo_state.iter+1); } (algo_state.iterateVec)->set(x); }
void initialize( Vector<Real> &x, const Vector<Real> &s, const Vector<Real> &g, Objective<Real> &obj, BoundConstraint<Real> &bnd, AlgorithmState<Real> &algo_state ) { d_ = x.clone(); // Initialize unglobalized step Teuchos::ParameterList& list = parlist_.sublist("Step").sublist("Line Search").sublist("Descent Method"); EDescent edesc = StringToEDescent(list.get("Type","Quasi-Newton Method") ); if (bnd.isActivated()) { switch(edesc) { case DESCENT_STEEPEST: { desc_ = Teuchos::rcp(new GradientStep<Real>(parlist_,computeObj_)); break; } case DESCENT_NONLINEARCG: { desc_ = Teuchos::rcp(new NonlinearCGStep<Real>(parlist_,nlcg_,computeObj_)); break; } case DESCENT_SECANT: { desc_ = Teuchos::rcp(new ProjectedSecantStep<Real>(parlist_,secant_,computeObj_)); break; } case DESCENT_NEWTON: { desc_ = Teuchos::rcp(new ProjectedNewtonStep<Real>(parlist_,computeObj_)); break; } case DESCENT_NEWTONKRYLOV: { desc_ = Teuchos::rcp(new ProjectedNewtonKrylovStep<Real>(parlist_,krylov_,secant_,computeObj_)); break; } default: TEUCHOS_TEST_FOR_EXCEPTION(true,std::invalid_argument, ">>> (LineSearchStep::Initialize): Undefined descent type!"); } } else { switch(edesc) { case DESCENT_STEEPEST: { desc_ = Teuchos::rcp(new GradientStep<Real>(parlist_,computeObj_)); break; } case DESCENT_NONLINEARCG: { desc_ = Teuchos::rcp(new NonlinearCGStep<Real>(parlist_,nlcg_,computeObj_)); break; } case DESCENT_SECANT: { desc_ = Teuchos::rcp(new SecantStep<Real>(parlist_,secant_,computeObj_)); break; } case DESCENT_NEWTON: { desc_ = Teuchos::rcp(new NewtonStep<Real>(parlist_,computeObj_)); break; } case DESCENT_NEWTONKRYLOV: { desc_ = Teuchos::rcp(new NewtonKrylovStep<Real>(parlist_,krylov_,secant_,computeObj_)); break; } default: TEUCHOS_TEST_FOR_EXCEPTION(true,std::invalid_argument, ">>> (LineSearchStep::Initialize): Undefined descent type!"); } } desc_->initialize(x,s,g,obj,bnd,algo_state); // Initialize line search lineSearch_->initialize(x,s,g,obj,bnd); //Teuchos::RCP<const StepState<Real> > desc_state = desc_->getStepState(); //lineSearch_->initialize(x,s,*(desc_state->gradientVec),obj,bnd); }
/** \brief Compute step. Computes a trial step, \f$s_k\f$ as defined by the enum EDescent. Once the trial step is determined, this function determines an approximate minimizer of the 1D function \f$\phi_k(t) = f(x_k+ts_k)\f$. This approximate minimizer must satisfy sufficient decrease and curvature conditions. @param[out] s is the computed trial step @param[in] x is the current iterate @param[in] obj is the objective function @param[in] con are the bound constraints @param[in] algo_state contains the current state of the algorithm */ void compute( Vector<Real> &s, const Vector<Real> &x, Objective<Real> &obj, BoundConstraint<Real> &con, AlgorithmState<Real> &algo_state ) { Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState(); Real tol = std::sqrt(ROL_EPSILON); // Set active set parameter Real eps = 0.0; if ( con.isActivated() ) { eps = algo_state.gnorm; } lineSearch_->setData(eps); if ( hessian_ != Teuchos::null ) { hessian_->setData(eps); } if ( precond_ != Teuchos::null ) { precond_->setData(eps); } // Compute step s switch(edesc_) { case DESCENT_NEWTONKRYLOV: flagKrylov_ = 0; krylov_->run(s,*hessian_,*(step_state->gradientVec),*precond_,iterKrylov_,flagKrylov_); break; case DESCENT_NEWTON: case DESCENT_SECANT: hessian_->applyInverse(s,*(step_state->gradientVec),tol); break; case DESCENT_NONLINEARCG: nlcg_->run(s,*(step_state->gradientVec),x,obj); break; case DESCENT_STEEPEST: s.set(step_state->gradientVec->dual()); break; default: break; } // Compute g.dot(s) Real gs = 0.0; if ( !con.isActivated() ) { gs = -s.dot((step_state->gradientVec)->dual()); } else { if ( edesc_ == DESCENT_STEEPEST ) { d_->set(x); d_->axpy(-1.0,s); con.project(*d_); d_->scale(-1.0); d_->plus(x); //d->set(s); //con.pruneActive(*d,s,x,eps); //con.pruneActive(*d,*(step_state->gradientVec),x,eps); gs = -d_->dot((step_state->gradientVec)->dual()); } else { d_->set(s); con.pruneActive(*d_,*(step_state->gradientVec),x,eps); gs = -d_->dot((step_state->gradientVec)->dual()); d_->set(x); d_->axpy(-1.0,(step_state->gradientVec)->dual()); con.project(*d_); d_->scale(-1.0); d_->plus(x); con.pruneInactive(*d_,*(step_state->gradientVec),x,eps); gs -= d_->dot((step_state->gradientVec)->dual()); } } // Check if s is a descent direction i.e., g.dot(s) < 0 if ( gs >= 0.0 || (flagKrylov_ == 2 && iterKrylov_ <= 1) ) { s.set((step_state->gradientVec)->dual()); if ( con.isActivated() ) { d_->set(s); con.pruneActive(*d_,s,x); gs = -d_->dot((step_state->gradientVec)->dual()); } else { gs = -s.dot((step_state->gradientVec)->dual()); } } s.scale(-1.0); // Perform line search Real fnew = algo_state.value; ls_nfval_ = 0; ls_ngrad_ = 0; lineSearch_->run(step_state->searchSize,fnew,ls_nfval_,ls_ngrad_,gs,s,x,obj,con); // Make correction if maximum function evaluations reached if(!acceptLastAlpha_) { lineSearch_->setMaxitUpdate(step_state->searchSize,fnew,algo_state.value); } algo_state.nfval += ls_nfval_; algo_state.ngrad += ls_ngrad_; // Compute get scaled descent direction s.scale(step_state->searchSize); if ( con.isActivated() ) { s.plus(x); con.project(s); s.axpy(-1.0,x); } // Update step state information (step_state->descentVec)->set(s); // Update algorithm state information algo_state.snorm = s.norm(); algo_state.value = fnew; }
/** \brief Compute step. Given \f$x_k\f$, this function first builds the primal-dual active sets \f$\mathcal{A}_k^-\f$ and \f$\mathcal{A}_k^+\f$. Next, it uses CR to compute the inactive components of the step by solving \f[ \nabla^2 f(x_k)_{\mathcal{I}_k,\mathcal{I}_k}(s_k)_{\mathcal{I}_k} = -\nabla f(x_k)_{\mathcal{I}_k} -\nabla^2 f(x_k)_{\mathcal{I}_k,\mathcal{A}_k} (s_k)_{\mathcal{A}_k}. \f] Finally, it updates the active components of the dual variables as \f[ \lambda_{k+1} = -\nabla f(x_k)_{\mathcal{A}_k} -(\nabla^2 f(x_k) s_k)_{\mathcal{A}_k}. \f] @param[out] s is the step computed via PDAS @param[in] x is the current iterate @param[in] obj is the objective function @param[in] con are the bound constraints @param[in] algo_state is the current state of the algorithm */ void compute( Vector<Real> &s, const Vector<Real> &x, Objective<Real> &obj, BoundConstraint<Real> &con, AlgorithmState<Real> &algo_state ) { Teuchos::RCP<StepState<Real> > step_state = Step<Real>::getState(); s.zero(); x0_->set(x); res_->set(*(step_state->gradientVec)); for ( iter_ = 0; iter_ < maxit_; iter_++ ) { /********************************************************************/ // MODIFY ITERATE VECTOR TO CHECK ACTIVE SET /********************************************************************/ xlam_->set(*x0_); // xlam = x0 xlam_->axpy(scale_,*(lambda_)); // xlam = x0 + c*lambda /********************************************************************/ // PROJECT x ONTO PRIMAL DUAL FEASIBLE SET /********************************************************************/ As_->zero(); // As = 0 con.setVectorToUpperBound(*xbnd_); // xbnd = u xbnd_->axpy(-1.0,x); // xbnd = u - x xtmp_->set(*xbnd_); // tmp = u - x con.pruneUpperActive(*xtmp_,*xlam_,neps_); // tmp = I(u - x) xbnd_->axpy(-1.0,*xtmp_); // xbnd = A(u - x) As_->plus(*xbnd_); // As += A(u - x) con.setVectorToLowerBound(*xbnd_); // xbnd = l xbnd_->axpy(-1.0,x); // xbnd = l - x xtmp_->set(*xbnd_); // tmp = l - x con.pruneLowerActive(*xtmp_,*xlam_,neps_); // tmp = I(l - x) xbnd_->axpy(-1.0,*xtmp_); // xbnd = A(l - x) As_->plus(*xbnd_); // As += A(l - x) /********************************************************************/ // APPLY HESSIAN TO ACTIVE COMPONENTS OF s AND REMOVE INACTIVE /********************************************************************/ itol_ = std::sqrt(ROL_EPSILON); if ( useSecantHessVec_ && secant_ != Teuchos::null ) { // IHAs = H*As secant_->applyB(*gtmp_,*As_,x); } else { obj.hessVec(*gtmp_,*As_,x,itol_); } con.pruneActive(*gtmp_,*xlam_,neps_); // IHAs = I(H*As) /********************************************************************/ // SEPARATE ACTIVE AND INACTIVE COMPONENTS OF THE GRADIENT /********************************************************************/ rtmp_->set(*(step_state->gradientVec)); // Inactive components con.pruneActive(*rtmp_,*xlam_,neps_); Ag_->set(*(step_state->gradientVec)); // Active components Ag_->axpy(-1.0,*rtmp_); /********************************************************************/ // SOLVE REDUCED NEWTON SYSTEM /********************************************************************/ rtmp_->plus(*gtmp_); rtmp_->scale(-1.0); // rhs = -Ig - I(H*As) s.zero(); if ( rtmp_->norm() > 0.0 ) { //solve(s,*rtmp_,*xlam_,x,obj,con); // Call conjugate residuals krylov_->run(s,*hessian_,*rtmp_,*precond_,iterCR_,flagCR_); con.pruneActive(s,*xlam_,neps_); // s <- Is } s.plus(*As_); // s = Is + As /********************************************************************/ // UPDATE MULTIPLIER /********************************************************************/ if ( useSecantHessVec_ && secant_ != Teuchos::null ) { secant_->applyB(*rtmp_,s,x); } else { obj.hessVec(*rtmp_,s,x,itol_); } gtmp_->set(*rtmp_); con.pruneActive(*gtmp_,*xlam_,neps_); lambda_->set(*rtmp_); lambda_->axpy(-1.0,*gtmp_); lambda_->plus(*Ag_); lambda_->scale(-1.0); /********************************************************************/ // UPDATE STEP /********************************************************************/ x0_->set(x); x0_->plus(s); res_->set(*(step_state->gradientVec)); res_->plus(*rtmp_); // Compute criticality measure xtmp_->set(*x0_); xtmp_->axpy(-1.0,res_->dual()); con.project(*xtmp_); xtmp_->axpy(-1.0,*x0_); // std::cout << s.norm() << " " // << tmp->norm() << " " // << res_->norm() << " " // << lambda_->norm() << " " // << flagCR_ << " " // << iterCR_ << "\n"; if ( xtmp_->norm() < gtol_*algo_state.gnorm ) { flag_ = 0; break; } if ( s.norm() < stol_*x.norm() ) { flag_ = 2; break; } } if ( iter_ == maxit_ ) { flag_ = 1; } else { iter_++; } }
virtual bool status( const ELineSearch type, int &ls_neval, int &ls_ngrad, const Real alpha, const Real fold, const Real sgold, const Real fnew, const Vector<Real> &x, const Vector<Real> &s, Objective<Real> &obj, BoundConstraint<Real> &con ) { Real tol = std::sqrt(ROL_EPSILON); // Check Armijo Condition bool armijo = false; if ( con.isActivated() ) { Real gs = 0.0; if ( edesc_ == DESCENT_STEEPEST ) { updateIterate(*d_,x,s,alpha,con); d_->scale(-1.0); d_->plus(x); gs = -s.dot(*d_); } else { d_->set(s); d_->scale(-1.0); con.pruneActive(*d_,*(grad_),x,eps_); gs = alpha*(grad_)->dot(*d_); d_->zero(); updateIterate(*d_,x,s,alpha,con); d_->scale(-1.0); d_->plus(x); con.pruneInactive(*d_,*(grad_),x,eps_); gs += d_->dot(grad_->dual()); } if ( fnew <= fold - c1_*gs ) { armijo = true; } } else { if ( fnew <= fold + c1_*alpha*sgold ) { armijo = true; } } // Check Maximum Iteration bool itcond = false; if ( ls_neval >= maxit_ ) { itcond = true; } // Check Curvature Condition bool curvcond = false; if ( armijo && ((type != LINESEARCH_BACKTRACKING && type != LINESEARCH_CUBICINTERP) || (edesc_ == DESCENT_NONLINEARCG)) ) { if (econd_ == CURVATURECONDITION_GOLDSTEIN) { if (fnew >= fold + (1.0-c1_)*alpha*sgold) { curvcond = true; } } else if (econd_ == CURVATURECONDITION_NULL) { curvcond = true; } else { updateIterate(*xtst_,x,s,alpha,con); obj.update(*xtst_); obj.gradient(*g_,*xtst_,tol); Real sgnew = 0.0; if ( con.isActivated() ) { d_->set(s); d_->scale(-alpha); con.pruneActive(*d_,s,x); sgnew = -d_->dot(g_->dual()); } else { sgnew = s.dot(g_->dual()); } ls_ngrad++; if ( ((econd_ == CURVATURECONDITION_WOLFE) && (sgnew >= c2_*sgold)) || ((econd_ == CURVATURECONDITION_STRONGWOLFE) && (std::abs(sgnew) <= c2_*std::abs(sgold))) || ((econd_ == CURVATURECONDITION_GENERALIZEDWOLFE) && (c2_*sgold <= sgnew && sgnew <= -c3_*sgold)) || ((econd_ == CURVATURECONDITION_APPROXIMATEWOLFE) && (c2_*sgold <= sgnew && sgnew <= (2.0*c1_ - 1.0)*sgold)) ) { curvcond = true; } } } if (type == LINESEARCH_BACKTRACKING || type == LINESEARCH_CUBICINTERP) { if (edesc_ == DESCENT_NONLINEARCG) { return ((armijo && curvcond) || itcond); } else { return (armijo || itcond); } } else { return ((armijo && curvcond) || itcond); } }