bool DenseLinAlgPack::comp(const DMatrixSlice& gms1, BLAS_Cpp::Transp trans1 , const DMatrixSlice& gms2, BLAS_Cpp::Transp trans2) { for(size_type i = 1; i < my_min(gms1.cols(),gms2.cols()); ++i) if( !comp( col(gms1,trans1,i) , col( gms2, trans2, i ) ) ) return false; return true; }
void DenseLinAlgPack::M_diagVtM( DMatrixSlice* gms_lhs, const DVectorSlice& vs_rhs , const DMatrixSlice& gms_rhs, BLAS_Cpp::Transp trans_rhs ) { Mp_M_assert_sizes(gms_lhs->rows(), gms_lhs->cols(), BLAS_Cpp::no_trans , gms_rhs.rows(), gms_rhs.cols(), trans_rhs); for(DMatrixSlice::size_type j = 1; j <= gms_lhs->cols(); ++j) prod( &gms_lhs->col(j), vs_rhs, col(gms_rhs,trans_rhs,j) ); }
inline /// Assert a matrix is square and throws length_error if it is not (LINALGPACK_CHECK_SLICE_SETUP). void assert_gms_square(const DMatrixSlice& gms) { #ifdef LINALGPACK_CHECK_SLICE_SETUP if(gms.rows() != gms.cols()) throw std::length_error("Matrix must be square"); #endif }
void DenseLinAlgPack::Mp_StM(DMatrixSlice* gms_lhs, value_type alpha, const DMatrixSlice& gms_rhs , BLAS_Cpp::Transp trans_rhs) { Mp_M_assert_sizes(gms_lhs->rows(), gms_lhs->cols(), BLAS_Cpp::no_trans , gms_rhs.rows(), gms_rhs.cols(), trans_rhs); for(DMatrixSlice::size_type j = 1; j <= gms_lhs->cols(); ++j) Vp_StV( &gms_lhs->col(j), alpha, col(gms_rhs,trans_rhs,j) ); }
void DenseLinAlgPack::M_StMtInvM(DMatrixSlice* gms_lhs, value_type alpha, const DMatrixSlice& gms_rhs1 , BLAS_Cpp::Transp trans_rhs1, const DMatrixSliceTri& tri_rhs2 , BLAS_Cpp::Transp trans_rhs2) { Mp_MtM_assert_sizes( gms_lhs->rows(), gms_lhs->cols(), BLAS_Cpp::no_trans , gms_rhs1.rows(), gms_rhs1.cols(), trans_rhs1 , tri_rhs2.gms().rows(), tri_rhs2.gms().cols(), trans_rhs2 ); i_trsm_alt(BLAS_Cpp::right,alpha,tri_rhs2,trans_rhs2,gms_rhs1,trans_rhs1,gms_lhs); }
void DenseLinAlgPack::M_StInvMtM(DMatrix* gm_lhs, value_type alpha, const DMatrixSliceTri& tri_rhs1 , BLAS_Cpp::Transp trans_rhs1, const DMatrixSlice& gms_rhs2 , BLAS_Cpp::Transp trans_rhs2) { MtM_assert_sizes( tri_rhs1.gms().rows(), tri_rhs1.gms().cols(), trans_rhs1 , gms_rhs2.rows(), gms_rhs2.cols(), trans_rhs2 ); gm_lhs->resize( rows(tri_rhs1.gms().rows(), tri_rhs1.gms().cols(), trans_rhs1) , cols(gms_rhs2.rows(), gms_rhs2.cols(), trans_rhs2) ); i_trsm_alt(BLAS_Cpp::left,alpha,tri_rhs1,trans_rhs1,gms_rhs2,trans_rhs2,&(*gm_lhs)()); }
inline DMatrixSlice::DMatrixSlice( DMatrixSlice& gms, const Range1D& I , const Range1D& J) : ptr_( gms.col_ptr(1) + (I.lbound() - 1) + (J.lbound() - 1) * gms.max_rows() ) , max_rows_(gms.max_rows()) , rows_(I.size()) , cols_(J.size()) { gms.validate_row_subscript(I.ubound()); gms.validate_col_subscript(J.ubound()); }
void DenseLinAlgPack::syr2k(BLAS_Cpp::Transp trans,value_type alpha, const DMatrixSlice& gms_rhs1 , const DMatrixSlice& gms_rhs2, value_type beta, DMatrixSliceSym* sym_lhs) { Mp_MtM_assert_sizes( sym_lhs->gms().rows(), sym_lhs->gms().cols(), BLAS_Cpp::no_trans , gms_rhs1.rows(), gms_rhs1.cols(), trans , gms_rhs2.rows(), gms_rhs2.cols(), trans_not(trans) ); BLAS_Cpp::syr2k(sym_lhs->uplo(),trans,sym_lhs->gms().rows() ,cols(gms_rhs1.rows(), gms_rhs1.cols(), trans),alpha ,gms_rhs1.col_ptr(1),gms_rhs1.max_rows() ,gms_rhs2.col_ptr(1),gms_rhs2.max_rows(),beta ,sym_lhs->gms().col_ptr(1),sym_lhs->gms().max_rows() ); }
void DenseLinAlgPack::Mp_StMtM(DMatrixSlice* gms_lhs, value_type alpha, const DMatrixSlice& gms_rhs1 , BLAS_Cpp::Transp trans_rhs1, const DMatrixSlice& gms_rhs2 , BLAS_Cpp::Transp trans_rhs2, value_type beta) { Mp_MtM_assert_sizes( gms_lhs->rows(), gms_lhs->cols(), BLAS_Cpp::no_trans , gms_rhs1.rows(), gms_rhs1.cols(), trans_rhs1 , gms_rhs2.rows(), gms_rhs2.cols(), trans_rhs2); BLAS_Cpp::gemm(trans_rhs1,trans_rhs2,gms_lhs->rows(),gms_lhs->cols() ,cols(gms_rhs1.rows(),gms_rhs1.cols(),trans_rhs1) ,alpha,gms_rhs1.col_ptr(1),gms_rhs1.max_rows() ,gms_rhs2.col_ptr(1),gms_rhs2.max_rows() ,beta,gms_lhs->col_ptr(1),gms_lhs->max_rows() ); }
// gm_lhs = op(gms_rhs) void DenseLinAlgPack::assign(DMatrix* gm_lhs, const DMatrixSlice& gms_rhs, BLAS_Cpp::Transp trans_rhs) { if(gm_lhs->overlap(gms_rhs) == SAME_MEM && trans_rhs == BLAS_Cpp::no_trans) return; // assignment to self if(gm_lhs->overlap(gms_rhs) != NO_OVERLAP) { // some overlap so we must create a copy DMatrix tmp(gms_rhs); resize_gm_lhs(gm_lhs,gms_rhs.rows(),gms_rhs.cols(),trans_rhs); i_assign(&(*gm_lhs)(), tmp(), trans_rhs); } else { // no overlap so just assign resize_gm_lhs(gm_lhs,gms_rhs.rows(),gms_rhs.cols(),trans_rhs); i_assign(&(*gm_lhs)(), gms_rhs, trans_rhs); } }
void DenseLinAlgPack::Mp_StMtM(DMatrixSlice* gms_lhs, value_type alpha, const DMatrixSlice& gms_rhs1 , BLAS_Cpp::Transp trans_rhs1, const DMatrixSliceSym& sym_rhs2 , BLAS_Cpp::Transp trans_rhs2, value_type beta) { Mp_MtM_assert_sizes( gms_lhs->rows(), gms_lhs->cols(), BLAS_Cpp::no_trans , gms_rhs1.rows(), gms_rhs1.cols(), trans_rhs1 , sym_rhs2.gms().rows(), sym_rhs2.gms().cols(), trans_rhs2 ); if(trans_rhs1 == BLAS_Cpp::no_trans) { i_symm(BLAS_Cpp::right,alpha,sym_rhs2,gms_rhs1,beta,gms_lhs); } else { // must make a temporary copy to call the BLAS DMatrix tmp; assign(&tmp,gms_rhs1,trans_rhs1); i_symm(BLAS_Cpp::right,alpha,sym_rhs2,tmp(),beta,gms_lhs); } }
inline /** \brief . */ /* * Utility to check if a lhs matrix slice is the same size as a rhs matrix slice. * * A DMatrixSlice can not be resized since the rows_ property of the * DMatrix it came from will not be updated. Allowing a DMatrixSlice * to resize from unsized would require that the DMatrixSlice carry * a reference to the DMatrix it was created from. If this is needed * then it will be added. */ void assert_gms_lhs(const DMatrixSlice& gms_lhs, size_type rows, size_type cols , BLAS_Cpp::Transp trans_rhs = BLAS_Cpp::no_trans) { if(trans_rhs == BLAS_Cpp::trans) std::swap(rows,cols); if(gms_lhs.rows() == rows && gms_lhs.cols() == cols) return; // same size // not the same size so is an error throw std::length_error("assert_gms_lhs(...): lhs DMatrixSlice dim does not match rhs dim"); }
void DenseLinAlgPack::delete_row_col( size_type kd, DMatrixSliceTriEle* tri_M ) { // Validate input TEUCHOS_TEST_FOR_EXCEPT( !( tri_M ) ); TEUCHOS_TEST_FOR_EXCEPT( !( tri_M->rows() ) ); TEUCHOS_TEST_FOR_EXCEPT( !( 1 <= kd && kd <= tri_M->rows() ) ); DMatrixSlice M = tri_M->gms(); const size_type n = M.rows(); if( tri_M->uplo() == BLAS_Cpp::lower ) { // Move M31 up one row at a time if( 1 < kd && kd < n ) { Range1D rng(1,kd-1); for( size_type i = kd; i < n; ++i ) M.row(i)(rng) = M.row(i+1)(rng); } // Move M33 up and to the left one column at a time if( kd < n ) { for( size_type i = kd; i < n; ++i ) M.col(i)(i,n-1) = M.col(i+1)(i+1,n); } } else if( tri_M->uplo() == BLAS_Cpp::upper ) { // Move M13 left one column at a time. if( 1 < kd && kd < n ) { Range1D rng(1,kd-1); for( size_type j = kd; j < n; ++j ) M.col(j)(rng) = M.col(j+1)(rng); } // Move the updated U33 up and left one column at a time. if( kd < n ) { for( size_type j = kd; j < n; ++j ) M.col(j)(kd,j) = M.col(j+1)(kd+1,j+1); } } else { TEUCHOS_TEST_FOR_EXCEPT(true); // Invalid input } }
bool DenseLinAlgPack::assert_print_nan_inf( const DMatrixSlice& m , const std::string & name, bool throw_excpt, std::ostream* out ) { bool has_nan_or_inf = false; bool printed_header = false; for( size_type j = 1; j <= m.cols(); ++j ) { const DVectorSlice& v = m.col(j); for( DVectorSlice::const_iterator v_itr = v.begin(); v_itr != v.end(); ++v_itr ) { if( RTOp_is_nan_inf(*v_itr) ) { if(out) { if(!printed_header) { *out << "The matrix \"" << name << "\" has the following NaN or Inf entries\n"; printed_header = true; } *out << name << "(" << v_itr - v.begin() + 1 << "," << j << ") = " << *v_itr << std::endl; } has_nan_or_inf = true; } } } if( has_nan_or_inf && throw_excpt ) { if(out) out->flush(); std::ostringstream omsg; omsg << "assert_print_nan_inf(...) : Error, the matrix named " << name << " has at least one element which is NaN or Inf"; throw NaNInfException( omsg.str() ); } return has_nan_or_inf; }
void DenseLinAlgPack::Vp_StMtV(DVectorSlice* vs_lhs, value_type alpha, const DMatrixSlice& gms_rhs1 , BLAS_Cpp::Transp trans_rhs1, const DVectorSlice& vs_rhs2, value_type beta) { Vp_MtV_assert_sizes(vs_lhs->dim(), gms_rhs1.rows() , gms_rhs1.cols(), trans_rhs1 , vs_rhs2.dim()); BLAS_Cpp::gemv(trans_rhs1,gms_rhs1.rows(),gms_rhs1.cols(),alpha,gms_rhs1.col_ptr(1) ,gms_rhs1.max_rows(), vs_rhs2.raw_ptr(),vs_rhs2.stride(),beta,vs_lhs->raw_ptr() ,vs_lhs->stride()); }
void MatrixSymPosDefLBFGS::update_Q() const { using DenseLinAlgPack::tri; using DenseLinAlgPack::tri_ele; using DenseLinAlgPack::Mp_StM; // // We need update the factorizations to solve for: // // x = inv(Q) * y // // [ y1 ] = [ (1/gk)*S'S L ] * [ x1 ] // [ y2 ] [ L' -D ] [ x2 ] // // We will solve the above system using the schur complement: // // C = (1/gk)*S'S + L*inv(D)*L' // // According to the referenced paper, C is p.d. so: // // C = J*J' // // We then compute the solution as: // // x1 = inv(C) * ( y1 + L*inv(D)*y2 ) // x2 = - inv(D) * ( y2 - L'*x1 ) // // Therefore we will just update the factorization C = J*J' // // Form the upper triangular part of C which will become J // which we are using storage of QJ if( QJ_.rows() < m_ ) QJ_.resize( m_, m_ ); const size_type mb = m_bar_; DMatrixSlice C = QJ_(1,mb,1,mb); // C = L * inv(D) * L' // // Here L is a strictly lower triangular (zero diagonal) matrix where: // // L = [ 0 0 ] // [ Lb 0 ] // // Lb is lower triangular (nonzero diagonal) // // Therefore we compute L*inv(D)*L' as: // // C = [ 0 0 ] * [ Db 0 ] * [ 0 Lb' ] // [ Lb 0 ] [ 0 db ] [ 0 0 ] // // = [ 0 0 ] = [ 0 0 ] // [ 0 Cb ] [ 0 Lb*Db*Lb' ] // // We need to compute the upper triangular part of Cb. C.row(1) = 0.0; if( mb > 1 ) comp_Cb( STY_(2,mb,1,mb-1), STY_.diag(0)(1,mb-1), &C(2,mb,2,mb) ); // C += (1/gk)*S'S const DMatrixSliceSym &STS = this->STS(); Mp_StM( &C, (1/gamma_k_), tri( STS.gms(), STS.uplo(), BLAS_Cpp::nonunit ) , BLAS_Cpp::trans ); // Now perform a cholesky factorization of C // After this factorization the upper triangular part of QJ // (through C) will contain the cholesky factor. DMatrixSliceTriEle C_upper = tri_ele( C, BLAS_Cpp::upper ); try { DenseLinAlgLAPack::potrf( &C_upper ); } catch( const DenseLinAlgLAPack::FactorizationException &fe ) { TEUCHOS_TEST_FOR_EXCEPTION( true, UpdateFailedException ,"Error, the factorization of Q which should be s.p.d. failed with" " the error message: {" << fe.what() << "}"; ); }
bool DenseLinAlgPack::comp(const DMatrixSlice& gms, value_type alpha) { for(size_type i = 1; i < gms.cols(); ++i) if( !comp( gms.col(i) , alpha ) ) return false; return true; }
inline /** \brief . */ DVectorSlice row(DMatrixSlice& gms, BLAS_Cpp::Transp trans, size_type i) { return (trans == BLAS_Cpp::no_trans) ? gms.row(i) : gms.col(i); }
inline /** \brief . */ const DVectorSlice col(const DMatrixSlice& gms, BLAS_Cpp::Transp trans, size_type j) { return (trans == BLAS_Cpp::no_trans) ? gms.col(j) : gms.row(j); }
inline DMatrix::DMatrix(const DMatrixSlice& gms) : v_(gms.rows() * gms.cols()), rows_(gms.rows()) { assign(this, gms, BLAS_Cpp::no_trans); }
// gms_lhs = op(gms_rhs) void DenseLinAlgPack::assign(DMatrixSlice* gms_lhs, const DMatrixSlice& gms_rhs, BLAS_Cpp::Transp trans_rhs) { assert_gms_lhs(*gms_lhs,gms_rhs.rows(),gms_rhs.cols(),trans_rhs); i_assign(gms_lhs, gms_rhs, trans_rhs); }
bool ReducedHessianSecantUpdateLPBFGS_Strategy::perform_update( DVectorSlice* s_bfgs, DVectorSlice* y_bfgs, bool first_update ,std::ostream& out, EJournalOutputLevel olevel, NLPAlgo *algo, NLPAlgoState *s ,MatrixOp *rHL_k ) { using std::setw; using std::endl; using std::right; using Teuchos::dyn_cast; namespace rcp = MemMngPack; using Teuchos::RCP; using LinAlgOpPack::V_MtV; using DenseLinAlgPack::dot; using AbstractLinAlgPack::norm_inf; using AbstractLinAlgPack::transVtMtV; typedef ConstrainedOptPack::MatrixHessianSuperBasic MHSB_t; using Teuchos::Workspace; Teuchos::WorkspaceStore* wss = Teuchos::get_default_workspace_store().get(); if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << "\n*** (LPBFGS) Full limited memory BFGS to projected BFGS ...\n"; } #ifdef _WINDOWS MHSB_t &rHL_super = dynamic_cast<MHSB_t&>(*rHL_k); #else MHSB_t &rHL_super = dyn_cast<MHSB_t>(*rHL_k); #endif const size_type n = algo->nlp().n(), r = algo->nlp().r(), n_pz = n-r; bool do_projected_rHL_RR = false; // See if we still have a limited memory BFGS update matrix RCP<MatrixSymPosDefLBFGS> // We don't want this to be deleted until we are done with it lbfgs_rHL_RR = Teuchos::rcp_const_cast<MatrixSymPosDefLBFGS>( Teuchos::rcp_dynamic_cast<const MatrixSymPosDefLBFGS>(rHL_super.B_RR_ptr()) ); if( lbfgs_rHL_RR.get() && rHL_super.Q_R().is_identity() ) { // // We have a limited memory BFGS matrix and have not started projected BFGS updating // yet so lets determine if it is time to consider switching // // Check that the current update is sufficiently p.d. before we do anything const value_type sTy = dot(*s_bfgs,*y_bfgs), yTy = dot(*y_bfgs,*y_bfgs); if( !ConstrainedOptPack::BFGS_sTy_suff_p_d( *s_bfgs,*y_bfgs,&sTy , int(olevel) >= int(PRINT_ALGORITHM_STEPS) ? &out : NULL ) && !proj_bfgs_updater().bfgs_update().use_dampening() ) { if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << "\nWarning! use_damening == false so there is no way we can perform any" " kind of BFGS update (projected or not) so we will skip it!\n"; } quasi_newton_stats_(*s).set_k(0).set_updated_stats( QuasiNewtonStats::INDEF_SKIPED ); return true; } // Consider if we can even look at the active set yet. const bool consider_switch = lbfgs_rHL_RR->num_secant_updates() >= min_num_updates_proj_start(); if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << "\nnum_previous_updates = " << lbfgs_rHL_RR->num_secant_updates() << ( consider_switch ? " >= " : " < " ) << "min_num_updates_proj_start = " << min_num_updates_proj_start() << ( consider_switch ? "\nConsidering if we should switch to projected BFGS updating of superbasics ...\n" : "\nNot time to consider switching to projected BFGS updating of superbasics yet!" ); } if( consider_switch ) { // // Our job here is to determine if it is time to switch to projected projected // BFGS updating. // if( act_set_stats_(*s).updated_k(-1) ) { if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << "\nDetermining if projected BFGS updating of superbasics should begin ...\n"; } // Determine if the active set has calmed down enough const SpVector &nu_km1 = s->nu().get_k(-1); const SpVectorSlice nu_indep = nu_km1(s->var_indep()); const bool act_set_calmed_down = PBFGSPack::act_set_calmed_down( act_set_stats_(*s).get_k(-1) ,proj_bfgs_updater().act_set_frac_proj_start() ,olevel,out ), max_num_updates_exceeded = lbfgs_rHL_RR->m_bar() >= max_num_updates_proj_start(); do_projected_rHL_RR = act_set_calmed_down || max_num_updates_exceeded; if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { if( act_set_calmed_down ) { out << "\nThe active set has calmed down enough so lets further consider switching to\n" << "projected BFGS updating of superbasic variables ...\n"; } else if( max_num_updates_exceeded ) { out << "\nThe active set has not calmed down enough but num_previous_updates = " << lbfgs_rHL_RR->m_bar() << " >= max_num_updates_proj_start = " << max_num_updates_proj_start() << "\nso we will further consider switching to projected BFGS updating of superbasic variables ...\n"; } else { out << "\nIt is not time to switch to projected BFGS so just keep performing full limited memory BFGS for now ...\n"; } } if( do_projected_rHL_RR ) { // // Determine the set of initially fixed and free independent variables. // typedef Workspace<size_type> i_x_t; typedef Workspace<ConstrainedOptPack::EBounds> bnd_t; i_x_t i_x_free(wss,n_pz); i_x_t i_x_fixed(wss,n_pz); bnd_t bnd_fixed(wss,n_pz); i_x_t l_x_fixed_sorted(wss,n_pz); size_type n_pz_X = 0, n_pz_R = 0; // rHL = rHL_scale * I value_type rHL_scale = sTy > 0.0 ? yTy/sTy : 1.0; if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << "\nScaling for diagonal intitial rHL = rHL_scale*I, rHL_scale = " << rHL_scale << std::endl; } value_type sRTBRRsR = 0.0, sRTyR = 0.0, sXTBXXsX = 0.0, sXTyX = 0.0; // Get the elements in i_x_free[] for variables that are definitely free // and initialize s_R'*B_RR*s_R and s_R'*y_R PBFGSPack::init_i_x_free_sRTsR_sRTyR( nu_indep, *s_bfgs, *y_bfgs , &n_pz_R, &i_x_free[0], &sRTBRRsR, &sRTyR ); sRTBRRsR *= rHL_scale; Workspace<value_type> rHL_XX_diag_ws(wss,nu_indep.nz()); DVectorSlice rHL_XX_diag(&rHL_XX_diag_ws[0],rHL_XX_diag_ws.size()); rHL_XX_diag = rHL_scale; // Sort fixed variables according to |s_X(i)^2*B_XX(i,i)|/|sRTBRRsR| + |s_X(i)*y_X(i)|/|sRTyR| // and initialize s_X'*B_XX*s_X and s_X*y_X PBFGSPack::sort_fixed_max_cond_viol( nu_indep,*s_bfgs,*y_bfgs,rHL_XX_diag,sRTBRRsR,sRTyR ,&sXTBXXsX,&sXTyX,&l_x_fixed_sorted[0]); // Pick initial set of i_x_free[] and i_x_fixed[] (sorted!) PBFGSPack::choose_fixed_free( proj_bfgs_updater().project_error_tol() ,proj_bfgs_updater().super_basic_mult_drop_tol(),nu_indep ,*s_bfgs,*y_bfgs,rHL_XX_diag,&l_x_fixed_sorted[0] ,olevel,out,&sRTBRRsR,&sRTyR,&sXTBXXsX,&sXTyX ,&n_pz_X,&n_pz_R,&i_x_free[0],&i_x_fixed[0],&bnd_fixed[0] ); if( n_pz_R < n_pz ) { // // We are ready to transition to projected BFGS updating! // // Determine if we are be using dense or limited memory BFGS? const bool low_num_super_basics = n_pz_R <= num_superbasics_switch_dense(); if( static_cast<int>(olevel) >= static_cast<int>(PRINT_BASIC_ALGORITHM_INFO) ) { out << "\nSwitching to projected BFGS updating ..." << "\nn_pz_R = " << n_pz_R << ( low_num_super_basics ? " <= " : " > " ) << " num_superbasics_switch_dense = " << num_superbasics_switch_dense() << ( low_num_super_basics ? "\nThere are not too many superbasic variables so use dense projected BFGS ...\n" : "\nThere are too many superbasic variables so use limited memory projected BFGS ...\n" ); } // Create new matrix to use for rHL_RR initialized to rHL_RR = rHL_scale*I RCP<MatrixSymSecant> rHL_RR = NULL; if( low_num_super_basics ) { rHL_RR = new MatrixSymPosDefCholFactor( NULL // Let it allocate its own memory ,NULL // ... ,n_pz // maximum size ,lbfgs_rHL_RR->maintain_original() ,lbfgs_rHL_RR->maintain_inverse() ); } else { rHL_RR = new MatrixSymPosDefLBFGS( n_pz, lbfgs_rHL_RR->m() ,lbfgs_rHL_RR->maintain_original() ,lbfgs_rHL_RR->maintain_inverse() ,lbfgs_rHL_RR->auto_rescaling() ); } rHL_RR->init_identity( n_pz_R, rHL_scale ); if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ITERATION_QUANTITIES) ) { out << "\nrHL_RR after intialized to rHL_RR = rHL_scale*I but before performing current BFGS update:\nrHL_RR =\n" << dynamic_cast<MatrixOp&>(*rHL_RR); // I know this is okay! } // Initialize rHL_XX = rHL_scale*I #ifdef _WINDOWS MatrixSymInitDiag &rHL_XX = dynamic_cast<MatrixSymInitDiag&>( const_cast<MatrixSymOp&>(*rHL_super.B_XX_ptr())); #else MatrixSymInitDiag &rHL_XX = dyn_cast<MatrixSymInitDiag>( const_cast<MatrixSymOp&>(*rHL_super.B_XX_ptr())); #endif rHL_XX.init_identity( n_pz_X, rHL_scale ); // Reinitialize rHL rHL_super.initialize( n_pz, n_pz_R, &i_x_free[0], &i_x_fixed[0], &bnd_fixed[0] ,Teuchos::rcp_const_cast<const MatrixSymWithOpFactorized>( Teuchos::rcp_dynamic_cast<MatrixSymWithOpFactorized>(rHL_RR)) ,NULL,BLAS_Cpp::no_trans,rHL_super.B_XX_ptr() ); // // Perform the current BFGS update first // MatrixSymOp &rHL_RR_op = dynamic_cast<MatrixSymOp&>(*rHL_RR); const GenPermMatrixSlice &Q_R = rHL_super.Q_R(), &Q_X = rHL_super.Q_X(); // Get projected BFGS update vectors y_bfgs_R, s_bfgs_R Workspace<value_type> y_bfgs_R_ws(wss,Q_R.cols()), s_bfgs_R_ws(wss,Q_R.cols()), y_bfgs_X_ws(wss,Q_X.cols()), s_bfgs_X_ws(wss,Q_X.cols()); DVectorSlice y_bfgs_R(&y_bfgs_R_ws[0],y_bfgs_R_ws.size()); DVectorSlice s_bfgs_R(&s_bfgs_R_ws[0],s_bfgs_R_ws.size()); DVectorSlice y_bfgs_X(&y_bfgs_X_ws[0],y_bfgs_X_ws.size()); DVectorSlice s_bfgs_X(&s_bfgs_X_ws[0],s_bfgs_X_ws.size()); V_MtV( &y_bfgs_R, Q_R, BLAS_Cpp::trans, *y_bfgs ); // y_bfgs_R = Q_R'*y_bfgs V_MtV( &s_bfgs_R, Q_R, BLAS_Cpp::trans, *s_bfgs ); // s_bfgs_R = Q_R'*s_bfgs V_MtV( &y_bfgs_X, Q_X, BLAS_Cpp::trans, *y_bfgs ); // y_bfgs_X = Q_X'*y_bfgs V_MtV( &s_bfgs_X, Q_X, BLAS_Cpp::trans, *s_bfgs ); // s_bfgs_X = Q_X'*s_bfgs // Update rHL_RR if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << "\nPerform current BFGS update on " << n_pz_R << " x " << n_pz_R << " projected " << "reduced Hessian for the superbasic variables where B = rHL_RR...\n"; } QuasiNewtonStats quasi_newton_stats; proj_bfgs_updater().bfgs_update().perform_update( &s_bfgs_R(),&y_bfgs_R(),false,out,olevel,algo->algo_cntr().check_results() ,&rHL_RR_op, &quasi_newton_stats ); // Perform previous updates if possible if( lbfgs_rHL_RR->m_bar() ) { const size_type num_add_updates = std::_MIN(num_add_recent_updates(),lbfgs_rHL_RR->m_bar()); if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << "\nAdd the min(num_previous_updates,num_add_recent_updates) = min(" << lbfgs_rHL_RR->m_bar() << "," << num_add_recent_updates() << ") = " << num_add_updates << " most recent BFGS updates if possible ...\n"; } // Now add previous update vectors const value_type project_error_tol = proj_bfgs_updater().project_error_tol(); const DMatrixSlice S = lbfgs_rHL_RR->S(), Y = lbfgs_rHL_RR->Y(); size_type k = lbfgs_rHL_RR->k_bar(); // Location in S and Y of most recent update vectors for( size_type l = 1; l <= num_add_updates; ++l, --k ) { if(k == 0) k = lbfgs_rHL_RR->m_bar(); // see MatrixSymPosDefLBFGS // Check to see if this update satisfies the required conditions. // Start with the condition on s'*y since this are cheap to check. V_MtV( &s_bfgs_X(), Q_X, BLAS_Cpp::trans, S.col(k) ); // s_bfgs_X = Q_X'*s_bfgs V_MtV( &y_bfgs_X(), Q_X, BLAS_Cpp::trans, Y.col(k) ); // y_bfgs_X = Q_X'*y_bfgs sRTyR = dot( s_bfgs_R, y_bfgs_R ); sXTyX = dot( s_bfgs_X, y_bfgs_X ); bool sXTyX_cond = ::fabs(sXTyX/sRTyR) <= project_error_tol, do_update = sXTyX_cond, sXTBXXsX_cond = false; if( sXTyX_cond ) { // Check the second more expensive condition V_MtV( &s_bfgs_R(), Q_R, BLAS_Cpp::trans, S.col(k) ); // s_bfgs_R = Q_R'*s_bfgs V_MtV( &y_bfgs_R(), Q_R, BLAS_Cpp::trans, Y.col(k) ); // y_bfgs_R = Q_R'*y_bfgs sRTBRRsR = transVtMtV( s_bfgs_R, rHL_RR_op, BLAS_Cpp::no_trans, s_bfgs_R ); sXTBXXsX = rHL_scale * dot( s_bfgs_X, s_bfgs_X ); sXTBXXsX_cond = sXTBXXsX/sRTBRRsR <= project_error_tol; do_update = sXTBXXsX_cond && sXTyX_cond; } if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << "\n---------------------------------------------------------------------" << "\nprevious update " << l << "\n\nChecking projection error:\n" << "\n|s_X'*y_X|/|s_R'*y_R| = |" << sXTyX << "|/|" << sRTyR << "| = " << ::fabs(sXTyX/sRTyR) << ( sXTyX_cond ? " <= " : " > " ) << " project_error_tol = " << project_error_tol; if( sXTyX_cond ) { out << "\n(s_X'*rHL_XX*s_X/s_R'*rHL_RR*s_R) = (" << sXTBXXsX << ") = " << (sXTBXXsX/sRTBRRsR) << ( sXTBXXsX_cond ? " <= " : " > " ) << " project_error_tol = " << project_error_tol; } out << ( do_update ? "\n\nAttemping to add this previous update where B = rHL_RR ...\n" : "\n\nCan not add this previous update ...\n" ); } if( do_update ) { // ( rHL_RR, s_bfgs_R, y_bfgs_R ) -> rHL_RR (this should not throw an exception!) try { proj_bfgs_updater().bfgs_update().perform_update( &s_bfgs_R(),&y_bfgs_R(),false,out,olevel,algo->algo_cntr().check_results() ,&rHL_RR_op, &quasi_newton_stats ); } catch( const MatrixSymSecant::UpdateSkippedException& excpt ) { if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << "\nOops! The " << l << "th most recent BFGS update was rejected?:\n" << excpt.what() << std::endl; } } } } if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << "\n---------------------------------------------------------------------\n"; } if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ITERATION_QUANTITIES) ) { out << "\nrHL_RR after adding previous BFGS updates:\nrHL_BRR =\n" << dynamic_cast<MatrixOp&>(*rHL_RR); } } else { if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << "\nThere were no previous update vectors to add!\n"; } } if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ITERATION_QUANTITIES) ) { out << "\nFull rHL after complete reinitialization:\nrHL =\n" << *rHL_k; } quasi_newton_stats_(*s).set_k(0).set_updated_stats( QuasiNewtonStats::REINITIALIZED ); return true; } else { if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << "\nn_pz_R == n_pz = " << n_pz_R << ", No variables would be removed from " << "the superbasis so just keep on performing limited memory BFGS for now ...\n"; } do_projected_rHL_RR = false; } } } } // If we have not switched to PBFGS then just update the full limited memory BFGS matrix if(!do_projected_rHL_RR) { if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << "\nPerform current BFGS update on " << n_pz << " x " << n_pz << " full " << "limited memory reduced Hessian B = rHL ...\n"; } proj_bfgs_updater().bfgs_update().perform_update( s_bfgs,y_bfgs,first_update,out,olevel,algo->algo_cntr().check_results() ,lbfgs_rHL_RR.get() ,&quasi_newton_stats_(*s).set_k(0) ); return true; } } else { if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) { out << "\nWe have already switched to projected BFGS updating ...\n"; } } // // If we get here then we must have switched to // projected updating so lets just pass it on! // return proj_bfgs_updater().perform_update( s_bfgs,y_bfgs,first_update,out,olevel,algo,s,rHL_k); }