コード例 #1
0
bool TangentialStepIP_Step::do_step(
  Algorithm& _algo, poss_type step_poss, IterationPack::EDoStepType type
  ,poss_type assoc_step_poss
  )
  {
  using BLAS_Cpp::no_trans;
  using Teuchos::dyn_cast;
  using AbstractLinAlgPack::assert_print_nan_inf;
  using LinAlgOpPack::Vt_S;
  using LinAlgOpPack::Vp_StV;
  using LinAlgOpPack::V_StV;
  using LinAlgOpPack::V_MtV;
  using LinAlgOpPack::V_InvMtV;
   using LinAlgOpPack::M_StM;
  using LinAlgOpPack::Mp_StM;
  using LinAlgOpPack::assign;

  NLPAlgo	&algo	= rsqp_algo(_algo);
  IpState	    &s      = dyn_cast<IpState>(_algo.state());

  EJournalOutputLevel olevel = algo.algo_cntr().journal_output_level();
  std::ostream& out = algo.track().journal_out();

  // print step header.
  if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
    using IterationPack::print_algorithm_step;
    print_algorithm_step( algo, step_poss, type, assoc_step_poss, out );
  }

  // Compute qp_grad which is an approximation to rGf + Z'*(mu*(invXu*e-invXl*e) + no_cross_term
  // minimize round off error by calc'ing Z'*(Gf + mu*(invXu*e-invXl*e))

  // qp_grad_k = Z'*(Gf + mu*(invXu*e-invXl*e))
  const MatrixSymDiagStd  &invXu = s.invXu().get_k(0);
  const MatrixSymDiagStd  &invXl = s.invXl().get_k(0);
  const value_type            &mu    = s.barrier_parameter().get_k(0);
  const MatrixOp          &Z_k   = s.Z().get_k(0);

  Teuchos::RCP<VectorMutable> rhs = s.Gf().get_k(0).clone();
  Vp_StV( rhs.get(), mu,      invXu.diag() );
  Vp_StV( rhs.get(), -1.0*mu, invXl.diag() );
  
  if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) 
    {
    out << "\n||Gf_k + mu_k*(invXu_k-invXl_k)||inf = " << rhs->norm_inf() << std::endl;
    }
  if( (int)olevel >= (int)PRINT_VECTORS)
    {
    out << "\nGf_k + mu_k*(invXu_k-invXl_k) =\n" << *rhs;
    }

  VectorMutable &qp_grad_k = s.qp_grad().set_k(0);
  V_MtV(&qp_grad_k, Z_k, BLAS_Cpp::trans, *rhs);
  
  if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) 
    {
    out << "\n||Z_k'*(Gf_k + mu_k*(invXu_k-invXl_k))||inf = " << qp_grad_k.norm_inf() << std::endl;
    }
  if( (int)olevel >= (int)PRINT_VECTORS )
    {
    out << "\nZ_k'*(Gf_k + mu_k*(invXu_k-invXl_k)) =\n" << qp_grad_k;
    }

  // error check for cross term
  value_type         &zeta    = s.zeta().set_k(0);
  const Vector &w_sigma = s.w_sigma().get_k(0);
  
  // need code to calculate damping parameter
  zeta = 1.0;

  Vp_StV(&qp_grad_k, zeta, w_sigma);

  if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS ) 
    {
    out << "\n||qp_grad_k||inf = " << qp_grad_k.norm_inf() << std::endl;
    }
  if( (int)olevel >= (int)PRINT_VECTORS ) 
    {
    out << "\nqp_grad_k =\n" << qp_grad_k;
    }

  // build the "Hessian" term B = rHL + rHB
  // should this be MatrixSymOpNonsing
  const MatrixSymOp      &rHL_k = s.rHL().get_k(0);
  const MatrixSymOp      &rHB_k = s.rHB().get_k(0);
  MatrixSymOpNonsing &B_k   = dyn_cast<MatrixSymOpNonsing>(s.B().set_k(0));
  if (B_k.cols() != Z_k.cols())
    {
    // Initialize space in rHB
    dyn_cast<MatrixSymInitDiag>(B_k).init_identity(Z_k.space_rows(), 0.0);
    }

  //	M_StM(&B_k, 1.0, rHL_k, no_trans);
  assign(&B_k, rHL_k, BLAS_Cpp::no_trans);
  if( (int)olevel >= (int)PRINT_VECTORS ) 
    {
    out << "\nB_k = rHL_k =\n" << B_k;
    }
  Mp_StM(&B_k, 1.0, rHB_k, BLAS_Cpp::no_trans);
  if( (int)olevel >= (int)PRINT_VECTORS ) 
    {
    out << "\nB_k = rHL_k + rHB_k =\n" << B_k;
    }

  // Solve the system pz = - inv(rHL) * qp_grad
  VectorMutable   &pz_k  = s.pz().set_k(0);
  V_InvMtV( &pz_k, B_k, no_trans, qp_grad_k );
  Vt_S( &pz_k, -1.0 );

  // Zpz = Z * pz
  V_MtV( &s.Zpz().set_k(0), s.Z().get_k(0), no_trans, pz_k );

  if( (int)olevel >= (int)PRINT_ALGORITHM_STEPS )
    {
    out	<< "\n||pz||inf   = " << s.pz().get_k(0).norm_inf()
      << "\nsum(Zpz)    = " << AbstractLinAlgPack::sum(s.Zpz().get_k(0))  << std::endl;
    }

  if( (int)olevel >= (int)PRINT_VECTORS )
    {
    out << "\npz_k = \n" << s.pz().get_k(0);
    out << "\nnu_k = \n" << s.nu().get_k(0);
    out << "\nZpz_k = \n" << s.Zpz().get_k(0);
    out << std::endl;
    }

  if(algo.algo_cntr().check_results())
    {
    assert_print_nan_inf(s.pz().get_k(0),  "pz_k",true,&out);
    assert_print_nan_inf(s.Zpz().get_k(0), "Zpz_k",true,&out);
    }

  return true;
  }
コード例 #2
0
bool LineSearchFullStep_Step::do_step(Algorithm& _algo
  , poss_type step_poss, IterationPack::EDoStepType type, poss_type assoc_step_poss)
{
  using AbstractLinAlgPack::Vp_StV;
  using AbstractLinAlgPack::assert_print_nan_inf;
  using LinAlgOpPack::V_VpV;

  NLPAlgo        &algo   = rsqp_algo(_algo);
  NLPAlgoState   &s      = algo.rsqp_state();
  NLP            &nlp    = algo.nlp();

  const size_type
    m = nlp.m();

  EJournalOutputLevel olevel = algo.algo_cntr().journal_output_level();
  std::ostream& out = algo.track().journal_out();

  // print step header.
  if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
    using IterationPack::print_algorithm_step;
    print_algorithm_step( algo, step_poss, type, assoc_step_poss, out );
  }
  
  // alpha_k = 1.0
  IterQuantityAccess<value_type>
    &alpha_iq   = s.alpha();
  if( !alpha_iq.updated_k(0) )
    alpha_iq.set_k(0) = 1.0;

  if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
    out	<< "\nf_k        = " << s.f().get_k(0);
    if(m)
      out << "\n||c_k||inf = " << s.c().get_k(0).norm_inf();
    out << "\nalpha_k    = " << alpha_iq.get_k(0) << std::endl;
  }

  // x_kp1 = x_k + d_k
  IterQuantityAccess<VectorMutable>  &x_iq = s.x();
  const Vector                       &x_k   = x_iq.get_k(0);
  VectorMutable                      &x_kp1 = x_iq.set_k(+1);
  x_kp1 = x_k;
  Vp_StV( &x_kp1, alpha_iq.get_k(0), s.d().get_k(0) );

  if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
    out	<< "\n||x_kp1||inf   = " << s.x().get_k(+1).norm_inf() << std::endl;
  }
  if( static_cast<int>(olevel) >= static_cast<int>(PRINT_VECTORS) ) {
    out << "\nx_kp1 =\n" << s.x().get_k(+1);
  }

  if(algo.algo_cntr().check_results()) {
    assert_print_nan_inf(
      x_kp1, "x_kp1",true
      ,int(olevel) >= int(PRINT_ALGORITHM_STEPS) ? &out : NULL
      );
    if( nlp.num_bounded_x() ) {
      if(!bounds_tester().check_in_bounds(
          int(olevel) >= int(PRINT_ALGORITHM_STEPS) ? &out : NULL
        , int(olevel) >= int(PRINT_VECTORS)					// print_all_warnings
        , int(olevel) >= int(PRINT_ITERATION_QUANTITIES)	// print_vectors
        , nlp.xl(), "xl"
        , nlp.xu(), "xu"
        , x_kp1, "x_kp1"
        ))
      {
        TEST_FOR_EXCEPTION(
          true, TestFailed
          ,"LineSearchFullStep_Step::do_step(...) : Error, "
          "the variables bounds xl <= x_k(+1) <= xu where violated!" );
      }
    }
  }

  // Calcuate f and c at the new point.
  nlp.unset_quantities();
  nlp.set_f( &s.f().set_k(+1) );
  if(m) nlp.set_c( &s.c().set_k(+1) );
  nlp.calc_f(x_kp1);
  if(m) nlp.calc_c( x_kp1, false );
  nlp.unset_quantities();

  if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
    out	<< "\nf_kp1        = " << s.f().get_k(+1);
    if(m)
      out << "\n||c_kp1||inf = " << s.c().get_k(+1).norm_inf();
    out << std::endl;
  }

  if( m && static_cast<int>(olevel) >= static_cast<int>(PRINT_VECTORS) ) {
    out << "\nc_kp1 =\n" << s.c().get_k(+1); 
  }

  if(algo.algo_cntr().check_results()) {
    assert_print_nan_inf( s.f().get_k(+1), "f(x_kp1)", true, &out );
    if(m)
      assert_print_nan_inf( s.c().get_k(+1), "c(x_kp1)", true, &out );
  }

  return true;
}
コード例 #3
0
bool NLPDirectTester::finite_diff_check(
  NLPDirect         *nlp
  ,const Vector     &xo
  ,const Vector     *xl
  ,const Vector     *xu
  ,const Vector     *c
  ,const Vector     *Gf
  ,const Vector     *py
  ,const Vector     *rGf
  ,const MatrixOp   *GcU
  ,const MatrixOp   *D
  ,const MatrixOp   *Uz
  ,bool             print_all_warnings
  ,std::ostream     *out
  ) const
{

  using std::setw;
  using std::endl;
  using std::right;

  using AbstractLinAlgPack::sum;
  using AbstractLinAlgPack::dot;
  using AbstractLinAlgPack::Vp_StV;
  using AbstractLinAlgPack::random_vector;
  using AbstractLinAlgPack::assert_print_nan_inf;
  using LinAlgOpPack::V_StV;
  using LinAlgOpPack::V_StMtV;
  using LinAlgOpPack::Vp_MtV;
  using LinAlgOpPack::M_StM;
  using LinAlgOpPack::M_StMtM;

  typedef VectorSpace::vec_mut_ptr_t  vec_mut_ptr_t;

//  using AbstractLinAlgPack::TestingPack::CompareDenseVectors;
//  using AbstractLinAlgPack::TestingPack::CompareDenseSparseMatrices;

  using TestingHelperPack::update_success;

  bool success = true, preformed_fd;
  if(out) {
    *out << std::boolalpha
       << std::endl
       << "*********************************************************\n"
       << "*** NLPDirectTester::finite_diff_check(...) ***\n"
       << "*********************************************************\n";
  }

  const Range1D
    var_dep      = nlp->var_dep(),
    var_indep    = nlp->var_indep(),
    con_decomp   = nlp->con_decomp(),
    con_undecomp = nlp->con_undecomp();
  NLP::vec_space_ptr_t
    space_x = nlp->space_x(),
    space_c = nlp->space_c();

  // //////////////////////////////////////////////
  // Validate the input

  TEST_FOR_EXCEPTION(
    py && !c, std::invalid_argument
    ,"NLPDirectTester::finite_diff_check(...) : "
    "Error, if py!=NULL then c!=NULL must also be true!" );

  const CalcFiniteDiffProd
    &fd_deriv_prod = this->calc_fd_prod();

  const value_type
    rand_y_l = -1.0, rand_y_u = 1.0,
    small_num = ::sqrt(std::numeric_limits<value_type>::epsilon());

  try {

  // ///////////////////////////////////////////////
  // (1) Check Gf

  if(Gf) {
    switch( Gf_testing_method() ) {
      case FD_COMPUTE_ALL: {
        // Compute FDGf outright
        TEST_FOR_EXCEPT(true); // ToDo: update above!
        break;
      }
      case FD_DIRECTIONAL: {
        // Compute FDGF'*y using random y's
        if(out)
          *out
            << "\nComparing products Gf'*y with finite difference values FDGf'*y for "
            << "random y's ...\n";
        vec_mut_ptr_t y = space_x->create_member();
        value_type max_warning_viol = 0.0;
        int num_warning_viol = 0;
        const int num_fd_directions_used = ( num_fd_directions() > 0 ? num_fd_directions() : 1 );
        for( int direc_i = 1; direc_i <= num_fd_directions_used; ++direc_i ) {
          if( num_fd_directions() > 0 ) {
            random_vector( rand_y_l, rand_y_u, y.get() );
            if(out)
              *out
                << "\n****"
                << "\n**** Random directional vector " << direc_i << " ( ||y||_1 / n = "
                << (y->norm_1() / y->dim()) << " )"
                << "\n***\n";
          }
          else {
            *y = 1.0;
            if(out)
              *out
                << "\n****"
                << "\n**** Ones vector y ( ||y||_1 / n = "<<(y->norm_1()/y->dim())<<" )"
                << "\n***\n";
          }
          value_type
            Gf_y = dot( *Gf, *y ),
            FDGf_y;
          preformed_fd = fd_deriv_prod.calc_deriv_product(
            xo,xl,xu
            ,*y,NULL,NULL,true,nlp,&FDGf_y,NULL,out,dump_all(),dump_all()
            );
          if( !preformed_fd )
            goto FD_NOT_PREFORMED;
          assert_print_nan_inf(FDGf_y, "FDGf'*y",true,out);
          const value_type
            calc_err = ::fabs( ( Gf_y - FDGf_y )/( ::fabs(Gf_y) + ::fabs(FDGf_y) + small_num ) );
          if( calc_err >= Gf_warning_tol() ) {
            max_warning_viol = my_max( max_warning_viol, calc_err );
            ++num_warning_viol;
          }
          if(out)
            *out
              << "\nrel_err(Gf'*y,FDGf'*y) = "
              << "rel_err(" << Gf_y << "," << FDGf_y << ") = "
              << calc_err << endl;
          if( calc_err >= Gf_error_tol() ) {
            if(out) {
              *out
                << "Error, above relative error exceeded Gf_error_tol = " << Gf_error_tol() << endl;
              if(dump_all()) {
                *out << "\ny =\n" << *y;
              }
            }
          }
        }
        if(out && num_warning_viol)
          *out
            << "\nThere were " << num_warning_viol << " warning tolerance "
            << "violations out of num_fd_directions = " << num_fd_directions()
            << " computations of FDGf'*y\n"
            << "and the maximum violation was " << max_warning_viol
            << " > Gf_waring_tol = " << Gf_warning_tol() << endl;
        break;
      }
      default:
        TEST_FOR_EXCEPT(true); // Invalid value
    }
  }

  // /////////////////////////////////////////////
  // (2) Check py = -inv(C)*c
  //
  // We want to check; 
  // 
  //  FDC * (inv(C)*c) \approx c
  //       \_________/
  //         -py
  //
  // We can compute this as:
  //           
  // FDC * py = [ FDC, FDN ] * [ -py ; 0 ]
  //            \__________/
  //                FDA'
  // 
  // t1 =  [ -py ; 0 ]
  // 
  // t2 = FDA'*t1
  // 
  // Compare t2 \approx c
  // 
  if(py) {
    if(out)
      *out
        << "\nComparing c with finite difference product FDA'*[ -py; 0 ] = -FDC*py ...\n";
    // t1 =  [ -py ; 0 ]
    VectorSpace::vec_mut_ptr_t
      t1 = space_x->create_member();
    V_StV( t1->sub_view(var_dep).get(), -1.0, *py );
    *t1->sub_view(var_indep) = 0.0;
    // t2 = FDA'*t1
    VectorSpace::vec_mut_ptr_t
      t2 = nlp->space_c()->create_member();
    preformed_fd = fd_deriv_prod.calc_deriv_product(
      xo,xl,xu
      ,*t1,NULL,NULL,true,nlp,NULL,t2.get(),out,dump_all(),dump_all()
      );
    if( !preformed_fd )
      goto FD_NOT_PREFORMED;
    const value_type
      sum_c  = sum(*c),
      sum_t2 = sum(*t2);
    assert_print_nan_inf(sum_t2, "sum(-FDC*py)",true,out);
    const value_type
      calc_err = ::fabs( ( sum_c - sum_t2 )/( ::fabs(sum_c) + ::fabs(sum_t2) + small_num ) );
    if(out)
      *out
        << "\nrel_err(sum(c),sum(-FDC*py)) = "
        << "rel_err(" << sum_c << "," << sum_t2 << ") = "
        << calc_err << endl;
    if( calc_err >= Gc_error_tol() ) {
      if(out)
        *out
          << "Error, above relative error exceeded Gc_error_tol = " << Gc_error_tol() << endl;
      if(print_all_warnings)
        *out << "\nt1 = [ -py; 0 ] =\n" << *t1
           << "\nt2 = FDA'*t1 = -FDC*py =\n"   << *t2;
      update_success( false, &success );
    }
    if( calc_err >= Gc_warning_tol() ) {
      if(out)
        *out
          << "\nWarning, above relative error exceeded Gc_warning_tol = " << Gc_warning_tol() << endl;
    }
  }

  // /////////////////////////////////////////////
  // (3) Check D = -inv(C)*N

  if(D) {
    switch( Gc_testing_method() ) {
      case FD_COMPUTE_ALL: {
        //
        // Compute FDN outright and check
        // -FDC * D \aprox FDN
        // 
        // We want to compute:
        // 
        // FDC * -D = [ FDC, FDN ] * [ -D; 0 ]
        //            \__________/
        //                FDA'
        // 
        // To compute the above we perform:
        // 
        // T = FDA' * [ -D; 0 ] (one column at a time)
        // 
        // Compare T \approx FDN
        //
/*
        // FDN
        DMatrix FDN(m,n-m);
        fd_deriv_computer.calc_deriv( xo, xl, xu
          , Range1D(m+1,n), nlp, NULL
          , &FDN() ,BLAS_Cpp::trans, out );

        // T = FDA' * [ -D; 0 ] (one column at a time)
        DMatrix T(m,n-m);
        DVector t(n);
        t(m+1,n) = 0.0;
        for( int s = 1; s <= n-m; ++s ) {
          // t = [ -D(:,s); 0 ]
          V_StV( &t(1,m), -1.0, D->col(s) );
          // T(:,s) =  FDA' * t
          fd_deriv_prod.calc_deriv_product(
            xo,xl,xu,t(),NULL,NULL,nlp,NULL,&T.col(s),out);
        }        

        // Compare T \approx FDN
        if(out)
          *out
            << "\nChecking the computed D = -inv(C)*N\n"
            << "where D(i,j) = (-FDC*D)(i,j), dM(i,j) = FDN(i,j) ...\n";
        result = comp_M.comp(
          T(), FDN, BLAS_Cpp::no_trans
          , CompareDenseSparseMatrices::FULL_MATRIX
          , CompareDenseSparseMatrices::REL_ERR_BY_COL
          , Gc_warning_tol(), Gc_error_tol()
          , print_all_warnings, out );
        update_success( result, &success );
        if(!result) return false;
*/
        TEST_FOR_EXCEPT(true); // Todo: Implement above!
        break;
      }
      case FD_DIRECTIONAL: {
        //
        // Compute -FDC * D * v \aprox FDN * v
        // for random v's
        //
        // We will compute this as:
        // 
        // t1 = [ 0; y ] <: R^(n)
        // 
        // t2 = FDA' * t1  (  FDN * y ) <: R^(m)
        //
        // t1 = [ -D * y ; 0 ]  <: R^(n)
        // 
        // t3 = FDA' * t1  ( -FDC * D * y ) <: R^(m)
        // 
        // Compare t2 \approx t3
        //
        if(out)
          *out
            << "\nComparing finite difference products -FDC*D*y with FDN*y for "
              "random vectors y ...\n";
        VectorSpace::vec_mut_ptr_t
          y  = space_x->sub_space(var_indep)->create_member(),
          t1 = space_x->create_member(),
          t2 = space_c->create_member(),
          t3 = space_c->create_member();
        value_type max_warning_viol = 0.0;
        int num_warning_viol = 0;
        const int num_fd_directions_used = ( num_fd_directions() > 0 ? num_fd_directions() : 1 );
        for( int direc_i = 1; direc_i <= num_fd_directions_used; ++direc_i ) {
          if( num_fd_directions() > 0 ) {
            random_vector( rand_y_l, rand_y_u, y.get() );
            if(out)
              *out
                << "\n****"
                << "\n**** Random directional vector " << direc_i << " ( ||y||_1 / n = "
                << (y->norm_1() / y->dim()) << " )"
                << "\n***\n";
          }
          else {
            *y = 1.0;
            if(out)
              *out
                << "\n****"
                << "\n**** Ones vector y ( ||y||_1 / n = "<<(y->norm_1()/y->dim())<<" )"
                << "\n***\n";
          }
          // t1 = [ 0; y ] <: R^(n)
          *t1->sub_view(var_dep)   = 0.0;
          *t1->sub_view(var_indep) = *y;
          // t2 = FDA' * t1  (  FDN * y ) <: R^(m)
          preformed_fd = fd_deriv_prod.calc_deriv_product(
            xo,xl,xu
            ,*t1,NULL,NULL,true,nlp,NULL,t2.get(),out,dump_all(),dump_all()
            );
          if( !preformed_fd )
            goto FD_NOT_PREFORMED;
          // t1 = [ -D * y ; 0 ]  <: R^(n)
          V_StMtV( t1->sub_view(var_dep).get(), -1.0, *D, BLAS_Cpp::no_trans, *y );
          *t1->sub_view(var_indep) = 0.0;
          // t3 = FDA' * t1  ( -FDC * D * y ) <: R^(m)
          preformed_fd = fd_deriv_prod.calc_deriv_product(
            xo,xl,xu
            ,*t1,NULL,NULL,true,nlp,NULL,t3.get(),out,dump_all(),dump_all()
            );
          // Compare t2 \approx t3
          const value_type
            sum_t2 = sum(*t2),
            sum_t3 = sum(*t3);
          const value_type
            calc_err = ::fabs( ( sum_t2 - sum_t3 )/( ::fabs(sum_t2) + ::fabs(sum_t3) + small_num ) );
          if(out)
            *out
              << "\nrel_err(sum(-FDC*D*y),sum(FDN*y)) = "
              << "rel_err(" << sum_t3 << "," << sum_t2 << ") = "
              << calc_err << endl;
          if( calc_err >= Gc_warning_tol() ) {
            max_warning_viol = my_max( max_warning_viol, calc_err );
            ++num_warning_viol;
          }
          if( calc_err >= Gc_error_tol() ) {
            if(out)
              *out
                << "Error, above relative error exceeded Gc_error_tol = " << Gc_error_tol() << endl
                << "Stoping the tests!\n";
            if(print_all_warnings)
              *out << "\ny =\n" << *y
                   << "\nt1 = [ -D*y; 0 ] =\n" << *t1
                   << "\nt2 =  FDA' * [ 0; y ] = FDN * y =\n" << *t2
                   << "\nt3 =  FDA' * t1 = -FDC * D * y =\n" << *t3;
            update_success( false, &success );
          }
        }
        if(out && num_warning_viol)
          *out
            << "\nThere were " << num_warning_viol << " warning tolerance "
            << "violations out of num_fd_directions = " << num_fd_directions()
            << " computations of sum(FDC*D*y) and sum(FDN*y)\n"
            << "and the maximum relative iolation was " << max_warning_viol
            << " > Gc_waring_tol = " << Gc_warning_tol() << endl;
        break;
      }
      default:
        TEST_FOR_EXCEPT(true);
    }
  }

  // ///////////////////////////////////////////////
  // (4) Check rGf = Gf(var_indep) + D'*Gf(var_dep)

  if(rGf) {
    if( Gf && D ) {
      if(out)
        *out
          << "\nComparing rGf_tmp = Gf(var_indep) - D'*Gf(var_dep) with rGf ...\n";
      VectorSpace::vec_mut_ptr_t
        rGf_tmp = space_x->sub_space(var_indep)->create_member();
      *rGf_tmp = *Gf->sub_view(var_indep);
      Vp_MtV( rGf_tmp.get(), *D, BLAS_Cpp::trans, *Gf->sub_view(var_dep) );
      const value_type
        sum_rGf_tmp  = sum(*rGf_tmp),
        sum_rGf      = sum(*rGf);
      const value_type
        calc_err = ::fabs( ( sum_rGf_tmp - sum_rGf )/( ::fabs(sum_rGf_tmp) + ::fabs(sum_rGf) + small_num ) );
      if(out)
        *out
          << "\nrel_err(sum(rGf_tmp),sum(rGf)) = "
          << "rel_err(" << sum_rGf_tmp << "," << sum_rGf << ") = "
          << calc_err << endl;
      if( calc_err >= Gc_error_tol() ) {
        if(out)
          *out
            << "Error, above relative error exceeded Gc_error_tol = " << Gc_error_tol() << endl;
        if(print_all_warnings)
          *out << "\nrGf_tmp =\n" << *rGf_tmp
             << "\nrGf =\n"   << *rGf;
        update_success( false, &success );
      }
      if( calc_err >= Gc_warning_tol() ) {
        if(out)
          *out
            << "\nWarning, above relative error exceeded Gc_warning_tol = "
            << Gc_warning_tol() << endl;
      }
    }
    else if( D ) {
      if(out)
        *out
          << "\nComparing rGf'*y with the finite difference product"
          << " fd_prod(f,[D*y;y]) for random vectors y ...\n";
      VectorSpace::vec_mut_ptr_t
        y  = space_x->sub_space(var_indep)->create_member(),
        t  = space_x->create_member();
      value_type max_warning_viol = 0.0;
      int num_warning_viol = 0;
      const int num_fd_directions_used = ( num_fd_directions() > 0 ? num_fd_directions() : 1 );
      for( int direc_i = 1; direc_i <= num_fd_directions_used; ++direc_i ) {
        if( num_fd_directions() > 0 ) {
          random_vector( rand_y_l, rand_y_u, y.get() );
          if(out)
            *out
              << "\n****"
              << "\n**** Random directional vector " << direc_i << " ( ||y||_1 / n = "
              << (y->norm_1() / y->dim()) << " )"
              << "\n***\n";
        }
        else {
          *y = 1.0;
          if(out)
            *out
              << "\n****"
              << "\n**** Ones vector y ( ||y||_1 / n = "<<(y->norm_1()/y->dim())<<" )"
              << "\n***\n";
        }
        // t = [ D*y; y ]
        LinAlgOpPack::V_MtV(&*t->sub_view(var_dep),*D,BLAS_Cpp::no_trans,*y);
        *t->sub_view(var_indep) = *y;
        value_type fd_rGf_y = 0.0;
        // fd_Gf_y
        preformed_fd = fd_deriv_prod.calc_deriv_product(
          xo,xl,xu
          ,*t,NULL,NULL,true,nlp,&fd_rGf_y,NULL,out,dump_all(),dump_all()
          );
        if( !preformed_fd )
          goto FD_NOT_PREFORMED;
        if(out) *out << "fd_prod(f,[D*y;y]) = " << fd_rGf_y << "\n";
        // rGf_y = rGf'*y
        const value_type rGf_y = dot(*rGf,*y);
        if(out) *out << "rGf'*y = " << rGf_y << "\n";
        // Compare fd_rGf_y and rGf*y
        const value_type
          calc_err = ::fabs( ( rGf_y - fd_rGf_y )/( ::fabs(rGf_y) + ::fabs(fd_rGf_y) + small_num ) );
        if( calc_err >= Gc_warning_tol() ) {
          max_warning_viol = my_max( max_warning_viol, calc_err );
          ++num_warning_viol;
        }
        if(out)
          *out
            << "\nrel_err(rGf'*y,fd_prod(f,[D*y;y])) = "
            << "rel_err(" << fd_rGf_y << "," << rGf_y << ") = "
            << calc_err << endl;
        if( calc_err >= Gf_error_tol() ) {
          if(out)
            *out << "Error, above relative error exceeded Gc_error_tol = " << Gc_error_tol() << endl;
          if(print_all_warnings)
            *out << "\ny =\n" << *y
                 << "\nt = [ D*y; y ] =\n" << *t;
          update_success( false, &success );
        }
      }
    }
    else {
      TEST_FOR_EXCEPT(true); // ToDo: Test rGf without D? (This is not going to be easy!)
    }
  }
  
  // ///////////////////////////////////////////////////
  // (5) Check GcU, and/or Uz (for undecomposed equalities)

  if(GcU || Uz) {
    TEST_FOR_EXCEPT(true); // ToDo: Implement!
  }
  
FD_NOT_PREFORMED:

  if(!preformed_fd) {
    if(out)
      *out
        << "\nError, the finite difference computation was not preformed due to cramped bounds\n"
        << "Finite difference test failed!\n" << endl;
    return false;
  }

  } // end try
  catch( const AbstractLinAlgPack::NaNInfException& except ) {
    if(out)
      *out
        << "Error, found a NaN or Inf.  Stoping tests\n";
    success = false;
  }
  
  if( out ) {
    if( success )
      *out
        << "\nCongradulations, all the finite difference errors where within the\n"
        "specified error tolerances!\n";
    else
      *out
        << "\nOh no, at least one of the above finite difference tests failed!\n";
  }

  return success;

}
コード例 #4
0
bool CheckConvergenceStd_Strategy::Converged(
  Algorithm& _algo
  )
  {
  using AbstractLinAlgPack::assert_print_nan_inf;
  using AbstractLinAlgPack::combined_nu_comp_err;
  
  NLPAlgo        &algo = rsqp_algo(_algo);
  NLPAlgoState   &s    = algo.rsqp_state();
  NLP            &nlp  = algo.nlp();

  EJournalOutputLevel olevel = algo.algo_cntr().journal_output_level();
  std::ostream& out = algo.track().journal_out();

  const size_type
    n  = nlp.n(),
    m  = nlp.m(),
    nb = nlp.num_bounded_x();

  // Get the iteration quantities
  IterQuantityAccess<value_type>
    &opt_kkt_err_iq  = s.opt_kkt_err(),
    &feas_kkt_err_iq = s.feas_kkt_err(),
      &comp_kkt_err_iq = s.comp_kkt_err();
  
  IterQuantityAccess<VectorMutable>
    &x_iq       = s.x(),
    &d_iq       = s.d(),
    &Gf_iq      = s.Gf(),
    *c_iq       = m     ? &s.c()      : NULL,
    *rGL_iq     = n > m ? &s.rGL()    : NULL,
    *GL_iq      = n > m ? &s.GL()     : NULL,
    *nu_iq      = n > m ? &s.nu()     : NULL;

  // opt_err = (||rGL||inf or ||GL||) / (||Gf|| + scale_kkt_factor)
  value_type 
    norm_inf_Gf_k = 0.0,
    norm_inf_GLrGL_k = 0.0;

  if( n > m && scale_opt_error_by_Gf() && Gf_iq.updated_k(0) ) {
    assert_print_nan_inf(
      norm_inf_Gf_k = Gf_iq.get_k(0).norm_inf(),
      "||Gf_k||inf",true,&out
      );
  }

  // NOTE:
  // The strategy object CheckConvergenceIP_Strategy assumes
  // that this will always be the gradient of the lagrangian
  // of the original problem, not the gradient of the lagrangian
  // for psi. (don't use augmented nlp info here)
  if( n > m ) {
    if( opt_error_check() == OPT_ERROR_REDUCED_GRADIENT_LAGR ) {
      assert_print_nan_inf( norm_inf_GLrGL_k = rGL_iq->get_k(0).norm_inf(),
                  "||rGL_k||inf",true,&out);
    }
    else {
      assert_print_nan_inf( norm_inf_GLrGL_k = GL_iq->get_k(0).norm_inf(),
                  "||GL_k||inf",true,&out);
    }
  }

  const value_type
    opt_scale_factor = 1.0 + norm_inf_Gf_k,
    opt_err = norm_inf_GLrGL_k / opt_scale_factor;
  
  // feas_err
  const value_type feas_err = ( ( m ? c_iq->get_k(0).norm_inf() : 0.0 ) );

  // comp_err
  value_type comp_err = 0.0;
  if ( n > m ) {
    if (nb > 0) {
      comp_err = combined_nu_comp_err(nu_iq->get_k(0), x_iq.get_k(0), nlp.xl(), nlp.xu());
    }
    if(m) {
      assert_print_nan_inf( feas_err,"||c_k||inf",true,&out);
    }
  }

  // scaling factors
  const value_type 
    scale_opt_factor = CalculateScalingFactor(s, scale_opt_error_by()),
    scale_feas_factor = CalculateScalingFactor(s, scale_feas_error_by()),
    scale_comp_factor = CalculateScalingFactor(s, scale_comp_error_by());
  
  // kkt_err
  const value_type
    opt_kkt_err_k  = opt_err/scale_opt_factor,
     feas_kkt_err_k = feas_err/scale_feas_factor,
    comp_kkt_err_k = comp_err/scale_comp_factor;

  // update the iteration quantities
  if(n > m) opt_kkt_err_iq.set_k(0) = opt_kkt_err_k;
  feas_kkt_err_iq.set_k(0) = feas_kkt_err_k;
  comp_kkt_err_iq.set_k(0) = comp_kkt_err_k;

  // step_err
  value_type step_err = 0.0;
  if( d_iq.updated_k(0) ) {
    step_err = AbstractLinAlgPack::max_rel_step(x_iq.get_k(0),d_iq.get_k(0));
    assert_print_nan_inf( step_err,"max(d(i)/max(1,x(i)),i=1...n)",true,&out);
  }

  const value_type
    opt_tol		= algo.algo_cntr().opt_tol(),
    feas_tol	= algo.algo_cntr().feas_tol(),
    comp_tol	= algo.algo_cntr().comp_tol(),
    step_tol	= algo.algo_cntr().step_tol();
  
  const bool found_solution = 
    opt_kkt_err_k < opt_tol 
    && feas_kkt_err_k < feas_tol 
    && comp_kkt_err_k < comp_tol 
    && step_err < step_tol;
  
  if( int(olevel) >= int(PRINT_ALGORITHM_STEPS) || (int(olevel) >= int(PRINT_BASIC_ALGORITHM_INFO) && found_solution) )
  {
    out	
      << "\nscale_opt_factor = " << scale_opt_factor
      << " (scale_opt_error_by = " << (scale_opt_error_by()==SCALE_BY_ONE ? "SCALE_BY_ONE"
                       : (scale_opt_error_by()==SCALE_BY_NORM_INF_X ? "SCALE_BY_NORM_INF_X"
                        : "SCALE_BY_NORM_2_X" ) ) << ")"

      << "\nscale_feas_factor = " << scale_feas_factor
      << " (scale_feas_error_by = " << (scale_feas_error_by()==SCALE_BY_ONE ? "SCALE_BY_ONE"
                       : (scale_feas_error_by()==SCALE_BY_NORM_INF_X ? "SCALE_BY_NORM_INF_X"
                        : "SCALE_BY_NORM_2_X" ) ) << ")"

      << "\nscale_comp_factor = " << scale_comp_factor
      << " (scale_comp_error_by = " << (scale_comp_error_by()==SCALE_BY_ONE ? "SCALE_BY_ONE"
                       : (scale_comp_error_by()==SCALE_BY_NORM_INF_X ? "SCALE_BY_NORM_INF_X"
                        : "SCALE_BY_NORM_2_X" ) ) << ")"
      << "\nopt_scale_factor = " << opt_scale_factor
      << " (scale_opt_error_by_Gf = " << (scale_opt_error_by_Gf()?"true":"false") << ")"
      << "\nopt_kkt_err_k    = " << opt_kkt_err_k << ( opt_kkt_err_k < opt_tol ? " < " : " > " )
      << "opt_tol  = " << opt_tol
      << "\nfeas_kkt_err_k   = " << feas_kkt_err_k << ( feas_kkt_err_k < feas_tol ? " < " : " > " )
      << "feas_tol = " << feas_tol
      << "\ncomp_kkt_err_k   = " << comp_kkt_err_k << ( comp_kkt_err_k < comp_tol ? " < " : " > " )
      << "comp_tol = " << comp_tol
      << "\nstep_err         = " << step_err << ( step_err < step_tol ? " < " : " > " )
      << "step_tol = " << step_tol
      << std::endl;
    }
  
  return found_solution;

  }
コード例 #5
0
bool PostEvalNewPointBarrier_Step::do_step(
  Algorithm& _algo, poss_type step_poss, IterationPack::EDoStepType type
  ,poss_type assoc_step_poss
  )
  {
  using Teuchos::dyn_cast;
  using IterationPack::print_algorithm_step;
  using AbstractLinAlgPack::inv_of_difference;
  using AbstractLinAlgPack::correct_upper_bound_multipliers;
  using AbstractLinAlgPack::correct_lower_bound_multipliers;
  using LinAlgOpPack::Vp_StV;

  NLPAlgo            &algo   = dyn_cast<NLPAlgo>(_algo);
  IpState             &s      = dyn_cast<IpState>(_algo.state());
  NLP                 &nlp    = algo.nlp();
  
  EJournalOutputLevel olevel = algo.algo_cntr().journal_output_level();
  std::ostream& out = algo.track().journal_out();
  
  if(!nlp.is_initialized())
    nlp.initialize(algo.algo_cntr().check_results());

  // print step header.
  if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) 
    {
    using IterationPack::print_algorithm_step;
    print_algorithm_step( _algo, step_poss, type, assoc_step_poss, out );
    }

  IterQuantityAccess<VectorMutable> &x_iq = s.x();
  IterQuantityAccess<MatrixSymDiagStd> &Vl_iq = s.Vl();
  IterQuantityAccess<MatrixSymDiagStd> &Vu_iq = s.Vu();

  ///***********************************************************
  // Calculate invXl = diag(1/(x-xl)) 
  //  and invXu = diag(1/(xu-x)) matrices
  ///***********************************************************

  // get references to x, invXl, and invXu
  VectorMutable& x = x_iq.get_k(0);
  MatrixSymDiagStd& invXu = s.invXu().set_k(0);
  MatrixSymDiagStd& invXl = s.invXl().set_k(0);
  
  //std::cout << "xu=\n";
  //nlp.xu().output(std::cout);

  inv_of_difference(1.0, nlp.xu(), x, &invXu.diag());
  inv_of_difference(1.0, x, nlp.xl(), &invXl.diag());

  //std::cout << "invXu=\v";
  //invXu.output(std::cout);

  //std::cout << "\ninvXl=\v";
  //invXl.output(std::cout);
  
  // Check for divide by zeros - 
    using AbstractLinAlgPack::assert_print_nan_inf;
    assert_print_nan_inf(invXu.diag(), "invXu", true, &out); 
    assert_print_nan_inf(invXl.diag(), "invXl", true, &out); 
  // These should never go negative either - could be a useful check

  // Initialize Vu and Vl if necessary
  if ( /*!Vu_iq.updated_k(0) */ Vu_iq.last_updated() == IterQuantity::NONE_UPDATED )
    {
    VectorMutable& vu = Vu_iq.set_k(0).diag();		
    vu = 0;
    Vp_StV(&vu, s.barrier_parameter().get_k(-1), invXu.diag());

    if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) 
      {
      out << "\nInitialize Vu with barrier_parameter * invXu ...\n";
      }
    }

if ( /*!Vl_iq.updated_k(0) */ Vl_iq.last_updated() == IterQuantity::NONE_UPDATED  )
    {
    VectorMutable& vl = Vl_iq.set_k(0).diag();
    vl = 0;
    Vp_StV(&vl, s.barrier_parameter().get_k(-1), invXl.diag());

    if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) 
      {
      out << "\nInitialize Vl with barrier_parameter * invXl ...\n";
      }
    }

  if (s.num_basis().updated_k(0))
    {
    // Basis changed, reorder Vl and Vu
    if (Vu_iq.updated_k(-1))
      { Vu_iq.set_k(0,-1); }
    if (Vl_iq.updated_k(-1))
      { Vl_iq.set_k(0,-1); }
      
    VectorMutable& vu = Vu_iq.set_k(0).diag();
    VectorMutable& vl = Vl_iq.set_k(0).diag();

    s.P_var_last().permute( BLAS_Cpp::trans, &vu ); // Permute back to original order
    s.P_var_last().permute( BLAS_Cpp::trans, &vl ); // Permute back to original order

    if( (int)olevel >= (int)PRINT_VECTORS ) 
      {
      out	<< "\nx resorted vl and vu to the original order\n" << x;
      }

    s.P_var_current().permute( BLAS_Cpp::no_trans, &vu ); // Permute to new (current) order
    s.P_var_current().permute( BLAS_Cpp::no_trans, &vl ); // Permute to new (current) order

    if( (int)olevel >= (int)PRINT_VECTORS ) 
      {
      out	<< "\nx resorted to new basis \n" << x;
      }
    }

  correct_upper_bound_multipliers(nlp.xu(), +NLP::infinite_bound(), &Vu_iq.get_k(0).diag());
  correct_lower_bound_multipliers(nlp.xl(), -NLP::infinite_bound(), &Vl_iq.get_k(0).diag());
  
  if( (int)olevel >= (int)PRINT_VECTORS ) 
    {
    out << "x=\n"  << s.x().get_k(0);
    out << "xl=\n" << nlp.xl();
    out << "vl=\n" << s.Vl().get_k(0).diag();
    out << "xu=\n" << nlp.xu();
    out << "vu=\n" << s.Vu().get_k(0).diag();
    }

  // Update general algorithm bound multipliers
  VectorMutable& v = s.nu().set_k(0);
  v = Vu_iq.get_k(0).diag();
  Vp_StV(&v,-1.0,Vl_iq.get_k(0).diag());

  // Print vector information
  if( static_cast<int>(olevel) >= static_cast<int>(PRINT_VECTORS) ) 
    {
    out	<< "invXu_k.diag()=\n" << invXu.diag();
    out	<< "invXl_k.diag()=\n" << invXl.diag();
    out	<< "Vu_k.diag()=\n"    << Vu_iq.get_k(0).diag();
    out	<< "Vl_k.diag()=\n"    << Vl_iq.get_k(0).diag();
    out << "nu_k=\n"           << s.nu().get_k(0);
    }

  return true;
  }
コード例 #6
0
bool EvalNewPointStd_Step::do_step(
  Algorithm& _algo, poss_type step_poss, IterationPack::EDoStepType type
  ,poss_type assoc_step_poss
  )
{
  using Teuchos::rcp;
  using Teuchos::dyn_cast;
  using AbstractLinAlgPack::assert_print_nan_inf;
  using IterationPack::print_algorithm_step;
  using NLPInterfacePack::NLPFirstOrder;

  NLPAlgo         &algo   = rsqp_algo(_algo);
  NLPAlgoState    &s      = algo.rsqp_state();
  NLPFirstOrder   &nlp    = dyn_cast<NLPFirstOrder>(algo.nlp());

  EJournalOutputLevel olevel = algo.algo_cntr().journal_output_level();
  EJournalOutputLevel ns_olevel = algo.algo_cntr().null_space_journal_output_level();
  std::ostream& out = algo.track().journal_out();

  // print step header.
  if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
    using IterationPack::print_algorithm_step;
    print_algorithm_step( algo, step_poss, type, assoc_step_poss, out );
  }

  if(!nlp.is_initialized())
    nlp.initialize(algo.algo_cntr().check_results());

  Teuchos::VerboseObjectTempState<NLP>
    nlpOutputTempState(rcp(&nlp,false),Teuchos::getFancyOStream(rcp(&out,false)),convertToVerbLevel(olevel));

  const size_type
    n  = nlp.n(),
    nb = nlp.num_bounded_x(),
    m  = nlp.m();
  size_type
    r  = 0;

  // Get the iteration quantity container objects
  IterQuantityAccess<value_type>
    &f_iq   = s.f();
  IterQuantityAccess<VectorMutable>
    &x_iq   = s.x(),
    *c_iq   = m > 0  ? &s.c() : NULL,
    &Gf_iq  = s.Gf();
  IterQuantityAccess<MatrixOp>
    *Gc_iq  = m  > 0 ? &s.Gc() : NULL,
    *Z_iq   = NULL,
    *Y_iq   = NULL,
    *Uz_iq  = NULL,
    *Uy_iq  = NULL;
  IterQuantityAccess<MatrixOpNonsing>
    *R_iq   = NULL;

  MatrixOp::EMatNormType mat_nrm_inf = MatrixOp::MAT_NORM_INF;
  const bool calc_matrix_norms = algo.algo_cntr().calc_matrix_norms();
  const bool calc_matrix_info_null_space_only = algo.algo_cntr().calc_matrix_info_null_space_only();
  
  if( x_iq.last_updated() == IterQuantity::NONE_UPDATED ) {
    if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
      out << "\nx is not updated for any k so set x_k = nlp.xinit() ...\n";
    }
    x_iq.set_k(0) = nlp.xinit();
  }
  
  // Validate x
  if( nb && algo.algo_cntr().check_results() ) {
    assert_print_nan_inf(
      x_iq.get_k(0), "x_k", true
      , int(olevel) >= int(PRINT_ALGORITHM_STEPS) ? &out : NULL
      );
    if( nlp.num_bounded_x() > 0 ) {
      if(!bounds_tester().check_in_bounds(
           int(olevel)  >= int(PRINT_ALGORITHM_STEPS) ? &out : NULL
           ,int(olevel) >= int(PRINT_VECTORS)                // print_all_warnings
           ,int(olevel) >= int(PRINT_ITERATION_QUANTITIES)  // print_vectors
           ,nlp.xl(),        "xl"
           ,nlp.xu(),        "xu"
           ,x_iq.get_k(0),   "x_k"
           ))
      {
        TEUCHOS_TEST_FOR_EXCEPTION(
          true, TestFailed
          ,"EvalNewPointStd_Step::do_step(...) : Error, "
          "the variables bounds xl <= x_k <= xu where violated!" );
      }
    }
  }

  Vector &x = x_iq.get_k(0);

  Range1D  var_dep(Range1D::INVALID), var_indep(Range1D::INVALID);
  if( s.get_decomp_sys().get() ) {
    const ConstrainedOptPack::DecompositionSystemVarReduct
      *decomp_sys_vr = dynamic_cast<ConstrainedOptPack::DecompositionSystemVarReduct*>(&s.decomp_sys());
    if(decomp_sys_vr) {
      var_dep   = decomp_sys_vr->var_dep();
      var_indep = decomp_sys_vr->var_indep();
    }
    s.var_dep(var_dep);
    s.var_indep(var_indep);
  }

  if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
    out << "\n||x_k||inf            = " << x.norm_inf();
    if( var_dep.size() )
      out << "\n||x(var_dep)_k||inf   = " << x.sub_view(var_dep)->norm_inf();
    if( var_indep.size() )
      out << "\n||x(var_indep)_k||inf = " << x.sub_view(var_indep)->norm_inf();
    out << std::endl;
  }
  if( static_cast<int>(olevel) >= static_cast<int>(PRINT_VECTORS) ) {
    out << "\nx_k = \n" << x;
    if( var_dep.size() )
      out << "\nx(var_dep)_k = \n" << *x.sub_view(var_dep);
  }
  if( static_cast<int>(ns_olevel) >= static_cast<int>(PRINT_VECTORS) ) {
    if( var_indep.size() )
      out << "\nx(var_indep)_k = \n" << *x.sub_view(var_indep);
  }

  // Set the references to the current point's quantities to be updated
  const bool f_k_updated  = f_iq.updated_k(0);
  const bool Gf_k_updated = Gf_iq.updated_k(0);
  const bool c_k_updated  = m  > 0 ? c_iq->updated_k(0)  : false;
  const bool Gc_k_updated = m  > 0 ? Gc_iq->updated_k(0) : false;
  nlp.unset_quantities();
  if(!f_k_updated) nlp.set_f( &f_iq.set_k(0) );
  if(!Gf_k_updated) nlp.set_Gf( &Gf_iq.set_k(0) );
  if( m > 0 ) {
    if(!c_k_updated) nlp.set_c( &c_iq->set_k(0) );
    if(!Gc_k_updated) nlp.set_Gc( &Gc_iq->set_k(0) );
  }

  // Calculate Gc at x_k
  bool new_point = true;
  if(m > 0) {
    if(!Gc_k_updated) nlp.calc_Gc( x, new_point );
    new_point = false;
  }

  //
  // Update (or select a new) range/null decomposition
  //
  bool new_decomp_selected = false;
  if( m > 0 ) {
    
    // Update the range/null decomposition
    decomp_sys_handler().update_decomposition(
      algo, s, nlp, decomp_sys_testing(), decomp_sys_testing_print_level()
      ,&new_decomp_selected
      );

    r  = s.equ_decomp().size();

    Z_iq   = ( n > m && r > 0 )      ? &s.Z()  : NULL;
    Y_iq   = ( r > 0 )               ? &s.Y()  : NULL;
    Uz_iq  = ( m  > 0 && m  > r )    ? &s.Uz() : NULL;
    Uy_iq  = ( m  > 0 && m  > r )    ? &s.Uy() : NULL;
    R_iq   = ( m > 0 )               ? &s.R()  : NULL;

    // Determine if we will test the decomp_sys or not
    const DecompositionSystem::ERunTests
      ds_test_what = ( ( decomp_sys_testing() == DecompositionSystemHandler_Strategy::DST_TEST
                 || ( decomp_sys_testing() == DecompositionSystemHandler_Strategy::DST_DEFAULT
                  && algo.algo_cntr().check_results() ) )
               ? DecompositionSystem::RUN_TESTS
               : DecompositionSystem::NO_TESTS );
    
    // Determine the output level for decomp_sys				
    DecompositionSystem::EOutputLevel ds_olevel;
    switch(olevel) {
      case PRINT_NOTHING:
      case PRINT_BASIC_ALGORITHM_INFO:
        ds_olevel = DecompositionSystem::PRINT_NONE;
        break;
      case PRINT_ALGORITHM_STEPS:
      case PRINT_ACTIVE_SET:
        ds_olevel = DecompositionSystem::PRINT_BASIC_INFO;
        break;
      case PRINT_VECTORS:
        ds_olevel = DecompositionSystem::PRINT_VECTORS;
        break;
      case PRINT_ITERATION_QUANTITIES:
        ds_olevel = DecompositionSystem::PRINT_EVERY_THING;
        break;
      default:
        TEUCHOS_TEST_FOR_EXCEPT(true); // Should not get here!
    };

    // Test the decomposition system
    if( ds_test_what == DecompositionSystem::RUN_TESTS ) {
      // Set the output level
      if( decomp_sys_tester().print_tests() == DecompositionSystemTester::PRINT_NOT_SELECTED ) {
        DecompositionSystemTester::EPrintTestLevel  ds_olevel;
        switch(olevel) {
          case PRINT_NOTHING:
          case PRINT_BASIC_ALGORITHM_INFO:
            ds_olevel = DecompositionSystemTester::PRINT_NONE;
            break;
          case PRINT_ALGORITHM_STEPS:
          case PRINT_ACTIVE_SET:
            ds_olevel = DecompositionSystemTester::PRINT_BASIC;
            break;
          case PRINT_VECTORS:
            ds_olevel = DecompositionSystemTester::PRINT_MORE;
            break;
          case PRINT_ITERATION_QUANTITIES:
            ds_olevel = DecompositionSystemTester::PRINT_ALL;
            break;
          default:
            TEUCHOS_TEST_FOR_EXCEPT(true); // Should not get here!
        }
        decomp_sys_tester().print_tests(ds_olevel);
        decomp_sys_tester().dump_all( olevel == PRINT_ITERATION_QUANTITIES );
      }
      // Run the tests
      if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
        out << "\nTesting the range/null decompostion ...\n";
      }
      const bool
        decomp_sys_passed = decomp_sys_tester().test_decomp_system(
          s.decomp_sys()
          ,Gc_iq->get_k(0)                   // Gc
          ,Z_iq ? &Z_iq->get_k(0) : NULL     // Z
          ,&Y_iq->get_k(0)                   // Y
          ,&R_iq->get_k(0)                   // R
          ,m > r  ? &Uz_iq->get_k(0) : NULL  // Uz
          ,m > r  ? &Uy_iq->get_k(0) : NULL  // Uy
          ,( olevel >= PRINT_ALGORITHM_STEPS ) ? &out : NULL
          );
      TEUCHOS_TEST_FOR_EXCEPTION(
        !decomp_sys_passed, TestFailed
        ,"EvalNewPointStd_Step::do_step(...) : Error, "
        "the tests of the decomposition system failed!" );
    }
  }
  else {
    // Unconstrained problem
    Z_iq = &s.Z();
    dyn_cast<MatrixSymIdent>(Z_iq->set_k(0)).initialize( nlp.space_x() );
    s.equ_decomp(Range1D::Invalid);
    s.equ_undecomp(Range1D::Invalid);
  }

  // Calculate the rest of the quantities.  If decomp_sys is a variable
  // reduction decomposition system object, then nlp will be hip to the
  // basis selection and will permute these quantities to that basis.
  // Note that x will already be permuted to the current basis.
  if(!Gf_k_updated) { nlp.calc_Gf( x, new_point ); new_point = false; }
  if( m && (!c_k_updated || new_decomp_selected ) ) {
    if(c_k_updated) nlp.set_c( &c_iq->set_k(0) ); // This was not set earlier!
    nlp.calc_c( x, false);
  }
  if(!f_k_updated) {
    nlp.calc_f( x, false);
  }
  nlp.unset_quantities();
  
  // Check for NaN and Inf
  assert_print_nan_inf(f_iq.get_k(0),"f_k",true,&out); 
  if(m)
    assert_print_nan_inf(c_iq->get_k(0),"c_k",true,&out); 
  assert_print_nan_inf(Gf_iq.get_k(0),"Gf_k",true,&out); 
  
  // Print the iteration quantities before we test the derivatives for debugging

  // Update the selection of dependent and independent variables
  if( s.get_decomp_sys().get() ) {
    const ConstrainedOptPack::DecompositionSystemVarReduct
      *decomp_sys_vr = dynamic_cast<ConstrainedOptPack::DecompositionSystemVarReduct*>(&s.decomp_sys());
    if(decomp_sys_vr) {
      var_dep   = decomp_sys_vr->var_dep();
      var_indep = decomp_sys_vr->var_indep();
    }
  }
  
  if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
    out << "\nPrinting the updated iteration quantities ...\n";
  }

  if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) {
    out	<< "\nf_k                      = "     << f_iq.get_k(0);
    out << "\n||Gf_k||inf              = "     << Gf_iq.get_k(0).norm_inf();
    if( var_dep.size() )
      out << "\n||Gf_k(var_dep)_k||inf   = " << Gf_iq.get_k(0).sub_view(var_dep)->norm_inf();
    if( var_indep.size() )
      out << "\n||Gf_k(var_indep)_k||inf = " << Gf_iq.get_k(0).sub_view(var_indep)->norm_inf();
    if(m) {
      out << "\n||c_k||inf               = " << c_iq->get_k(0).norm_inf();
      if( calc_matrix_norms && !calc_matrix_info_null_space_only )
        out << "\n||Gc_k||inf              = " << Gc_iq->get_k(0).calc_norm(mat_nrm_inf).value;
      if( n > r && calc_matrix_norms && !calc_matrix_info_null_space_only )
        out << "\n||Z||inf                 = " << Z_iq->get_k(0).calc_norm(mat_nrm_inf).value;
      if( r && calc_matrix_norms && !calc_matrix_info_null_space_only )
        out << "\n||Y||inf                 = " << Y_iq->get_k(0).calc_norm(mat_nrm_inf).value;
      if( r && calc_matrix_norms && !calc_matrix_info_null_space_only  )
        out << "\n||R||inf                 = " << R_iq->get_k(0).calc_norm(mat_nrm_inf).value;
      if( algo.algo_cntr().calc_conditioning() && !calc_matrix_info_null_space_only ) {
        out << "\ncond_inf(R)              = " << R_iq->get_k(0).calc_cond_num(mat_nrm_inf).value;
      }
      if( m > r && calc_matrix_norms && !calc_matrix_info_null_space_only ) {
        out << "\n||Uz_k||inf              = " << Uz_iq->get_k(0).calc_norm(mat_nrm_inf).value;
        out << "\n||Uy_k||inf              = " << Uy_iq->get_k(0).calc_norm(mat_nrm_inf).value;
      }
    }
    out << std::endl;
  }

  if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ITERATION_QUANTITIES) ) {
    if(m)
      out << "\nGc_k =\n" << Gc_iq->get_k(0);
    if( n > r )
      out << "\nZ_k =\n" << Z_iq->get_k(0);
    if(r) {
      out << "\nY_k =\n" << Y_iq->get_k(0);
      out << "\nR_k =\n" << R_iq->get_k(0);
    }
    if( m > r ) {
      out << "\nUz_k =\n" << Uz_iq->get_k(0);
      out << "\nUy_k =\n" << Uy_iq->get_k(0);
    }
  }
  if( static_cast<int>(olevel) >= static_cast<int>(PRINT_VECTORS) ) {
    out	<< "\nGf_k =\n" << Gf_iq.get_k(0);
    if( var_dep.size() )
      out << "\nGf(var_dep)_k =\n " << *Gf_iq.get_k(0).sub_view(var_dep);
  }
  if( static_cast<int>(ns_olevel) >= static_cast<int>(PRINT_VECTORS) ) {
    if( var_indep.size() )
      out << "\nGf(var_indep)_k =\n" << *Gf_iq.get_k(0).sub_view(var_indep);
  }
  if( static_cast<int>(olevel) >= static_cast<int>(PRINT_VECTORS) ) {
    if(m)
      out	<< "\nc_k = \n" << c_iq->get_k(0);
  }
  
  // Check the derivatives if we are checking the results
  if(		fd_deriv_testing() == FD_TEST
    || ( fd_deriv_testing() == FD_DEFAULT && algo.algo_cntr().check_results() )  )
  {
    
    if( olevel >= PRINT_ALGORITHM_STEPS ) {
      out	<< "\n*** Checking derivatives by finite differences\n";
    }

    const bool
      nlp_passed = deriv_tester().finite_diff_check(
        &nlp
        ,x
        ,nb ? &nlp.xl() : NULL
        ,nb ? &nlp.xu() : NULL
        ,m  ? &Gc_iq->get_k(0) : NULL
        ,&Gf_iq.get_k(0)
        ,olevel >= PRINT_VECTORS
        ,( olevel >= PRINT_ALGORITHM_STEPS ) ? &out : NULL
        );
    TEUCHOS_TEST_FOR_EXCEPTION(
      !nlp_passed, TestFailed
      ,"EvalNewPointStd_Step::do_step(...) : Error, "
      "the tests of the nlp derivatives failed!" );
  }

  return true;
}
コード例 #7
0
bool PreEvalNewPointBarrier_Step::do_step(
  Algorithm& _algo, poss_type step_poss, IterationPack::EDoStepType type
  ,poss_type assoc_step_poss
  )
  {
  using Teuchos::dyn_cast;
  using IterationPack::print_algorithm_step;
  using AbstractLinAlgPack::force_in_bounds_buffer;

  NLPAlgo            &algo   = dyn_cast<NLPAlgo>(_algo);
  IpState             &s      = dyn_cast<IpState>(_algo.state());
  NLP                 &nlp    = algo.nlp();
  NLPFirstOrder   *nlp_foi = dynamic_cast<NLPFirstOrder*>(&nlp);
  
  EJournalOutputLevel olevel = algo.algo_cntr().journal_output_level();
  std::ostream& out = algo.track().journal_out();
  
  if(!nlp.is_initialized())
    nlp.initialize(algo.algo_cntr().check_results());

  // print step header.
  if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) 
    {
    using IterationPack::print_algorithm_step;
    print_algorithm_step( _algo, step_poss, type, assoc_step_poss, out );
    }

  IterQuantityAccess<value_type>     &barrier_parameter_iq = s.barrier_parameter();
  IterQuantityAccess<VectorMutable>  &x_iq  = s.x();

  if( x_iq.last_updated() == IterQuantity::NONE_UPDATED ) 
    {
    if( static_cast<int>(olevel) >= static_cast<int>(PRINT_ALGORITHM_STEPS) ) 
      {
      out << "\nInitialize x with x_k = nlp.xinit() ...\n"
        << " and push x_k within bounds.\n";
      }
    VectorMutable& x_k = x_iq.set_k(0) = nlp.xinit();
  
    // apply transformation operator to push x sufficiently within bounds
    force_in_bounds_buffer(relative_bound_push_, 
                 absolute_bound_push_,
                 nlp.xl(),
                 nlp.xu(),
                 &x_k);

    // evaluate the func and constraints
    IterQuantityAccess<value_type>
      &f_iq    = s.f();
    IterQuantityAccess<VectorMutable>
      &Gf_iq   = s.Gf(),
      *c_iq    = nlp.m() > 0 ? &s.c() : NULL;
    IterQuantityAccess<MatrixOp>
      *Gc_iq   = nlp_foi ? &s.Gc() : NULL;

    using AbstractLinAlgPack::assert_print_nan_inf;
    assert_print_nan_inf(x_k, "x", true, NULL); // With throw exception if Inf or NaN!

    // Wipe out storage for computed iteration quantities (just in case?) : RAB: 7/29/2002
    if(f_iq.updated_k(0))
      f_iq.set_not_updated_k(0);
    if(Gf_iq.updated_k(0))
      Gf_iq.set_not_updated_k(0);
    if (c_iq)
      {
      if(c_iq->updated_k(0))
        c_iq->set_not_updated_k(0);
      }
    if (nlp_foi)
      {
      if(Gc_iq->updated_k(0))
        Gc_iq->set_not_updated_k(0);
      }
    }

  if (barrier_parameter_iq.last_updated() == IterQuantity::NONE_UPDATED)
    {
    barrier_parameter_iq.set_k(-1) = 0.1; // RAB: 7/29/2002: This should be parameterized! (allow user to set this!)
    }

  // Print vector information
  if( static_cast<int>(olevel) >= static_cast<int>(PRINT_VECTORS) ) 
    {
    out << "x_k =\n" << x_iq.get_k(0);
    }

  return true;
  }
コード例 #8
0
bool MatrixOpNonsingTester::test_matrix(
  const MatrixOpNonsing   &M
  ,const char                     M_name[]
  ,std::ostream                   *out
  )
{
  namespace rcp = MemMngPack;
  using BLAS_Cpp::no_trans;
  using BLAS_Cpp::trans;
  using BLAS_Cpp::left;
  using BLAS_Cpp::right;
  using AbstractLinAlgPack::sum;
  using AbstractLinAlgPack::dot;
  using AbstractLinAlgPack::Vp_StV;
  using AbstractLinAlgPack::assert_print_nan_inf;
  using AbstractLinAlgPack::random_vector;
  using LinAlgOpPack::V_StMtV;
  using LinAlgOpPack::V_MtV;
  using LinAlgOpPack::V_StV;
  using LinAlgOpPack::V_VpV;
  using LinAlgOpPack::Vp_V;
  
  // ToDo: Check the preconditions
  
  bool success = true, result, lresult;
  const value_type
    rand_y_l  = -1.0,
    rand_y_u  = 1.0,
    small_num = ::pow(std::numeric_limits<value_type>::epsilon(),0.25),
    alpha     = 2.0;
  
  //
  // Perform the tests
  //

  if(out && print_tests() >= PRINT_BASIC)
    *out
      << "\nCheck: alpha*op(op(inv("<<M_name<<"))*op("<<M_name<<"))*v == alpha*v ...";
  if(out && print_tests() > PRINT_BASIC)
    *out << std::endl;

  VectorSpace::vec_mut_ptr_t
    v_c1 = M.space_cols().create_member(),
    v_c2 = M.space_cols().create_member(),
    v_r1 = M.space_rows().create_member(),
    v_r2 = M.space_rows().create_member();

  // Side of the matrix inverse	
  const BLAS_Cpp::Side    a_side[2]  = { BLAS_Cpp::left,     BLAS_Cpp::right };
  // If the matrices are transposed or not
  const BLAS_Cpp::Transp  a_trans[2] = { BLAS_Cpp::no_trans, BLAS_Cpp::trans };

  for( int side_i = 0; side_i < 2; ++side_i ) {
    for( int trans_i = 0; trans_i < 2; ++trans_i ) {
      const BLAS_Cpp::Side    t_side  = a_side[side_i];
      const BLAS_Cpp::Transp  t_trans = a_trans[trans_i];
      if(out && print_tests() >= PRINT_MORE)
        *out
          << "\n" << side_i+1 << "." << trans_i+1 << ") "
          << "Check: (t2 = "<<(t_side==left?"inv(":"alpha * ")<< M_name<<(t_trans==trans?"\'":"")<<(t_side==left?")":"")
          << " * (t1 = "<<(t_side==right?"inv(":"alpha * ")<<M_name<<(t_trans==trans?"\'":"")<<(t_side==right?")":"")
          << " * v)) == alpha * v ...";
      if(out && print_tests() > PRINT_MORE)
        *out << std::endl;
      result = true;
      VectorMutable
        *v  = NULL,
        *t1 = NULL,
        *t2 = NULL;
      if( (t_side == left && t_trans == no_trans) || (t_side == right && t_trans == trans) ) {
        // (inv(R)*R*v or R'*inv(R')*v
        v  = v_r1.get();
        t1 = v_c1.get();
        t2 = v_r2.get();
      }
      else {
        // (inv(R')*R'*v or R*inv(R)*v
        v  = v_c1.get();
        t1 = v_r1.get();
        t2 = v_c2.get();
      }
      for( int k = 1; k <= num_random_tests(); ++k ) {
        lresult = true;
        random_vector( rand_y_l, rand_y_u, v );
          if(out && print_tests() >= PRINT_ALL) {
          *out
            << "\n"<<side_i+1<<"."<<trans_i+1<<"."<<k<<") random vector " << k
            << " ( ||v||_1 / n = " << (v->norm_1() / v->dim()) << " )\n";
          if(dump_all() && print_tests() >= PRINT_ALL)
            *out << "\nv =\n" << *v;
        }
        // t1
        if( t_side == right ) {
          // t1 = inv(op(M))*v
          V_InvMtV( t1, M, t_trans, *v );
        }
        else {
          // t1 = alpha*op(M)*v
          V_StMtV( t1, alpha, M, t_trans, *v );
        }
        // t2
        if( t_side == left ) {
          // t2 = inv(op(M))*t1
          V_InvMtV( t2, M, t_trans, *t1 );
        }
        else {
          // t2 = alpha*op(M)*t1
          V_StMtV( t2, alpha, M, t_trans, *t1 );
        }
        const value_type
          sum_t2  = sum(*t2),
          sum_av  = alpha*sum(*v);
        assert_print_nan_inf(sum_t2, "sum(t2)",true,out);
        assert_print_nan_inf(sum_av, "sum(alpha*t1)",true,out);
        const value_type
          calc_err = ::fabs( ( sum_av - sum_t2 )
                     /( ::fabs(sum_av) + ::fabs(sum_t2) + small_num ) );
        if(out && print_tests() >= PRINT_ALL)
          *out
            << "\nrel_err(sum(alpha*v),sum(t2)) = "
            << "rel_err(" << sum_av << "," << sum_t2 << ") = "
            << calc_err << std::endl;
        if( calc_err >= warning_tol() ) {
          if(out && print_tests() >= PRINT_ALL)
            *out
              << std::endl
              << ( calc_err >= error_tol() ? "Error" : "Warning" )
              << ", rel_err(sum(alpha*v),sum(t2)) = "
              << "rel_err(" << sum_av << "," << sum_t2 << ") = "
              << calc_err
              << " exceeded "
              << ( calc_err >= error_tol() ? "error_tol" : "warning_tol" )
              << " = "
              << ( calc_err >= error_tol() ? error_tol() : warning_tol() )
              << std::endl;
          if(calc_err >= error_tol()) {
            if(dump_all() && print_tests() >= PRINT_ALL) {
              *out << "\nalpha = " << alpha << std::endl;
              *out << "\nv =\n"    << *v;
              *out << "\nt1 =\n"   << *t2;
              *out << "\nt2 =\n"   << *t2;
            }
            lresult = false;
          }
        }
        if(!lresult) result = false;
      }
      if(!result) success = false;
      if( out && print_tests() == PRINT_MORE )
        *out << " : " << ( result ? "passed" : "failed" )
           << std::endl;
    }
  }

  if( out && print_tests() == PRINT_BASIC )
    *out << " : " << ( success ? "passed" : "failed" );

  return success;
}