bool is_eq(const fitness_vector & f1, const fitness_vector & f2, double eps){ if(f1.size() != f2.size()) return false; for(unsigned int i = 0; i < f1.size(); i++){ if(fabs(f1[i]-f2[i])>eps) return false; } return true; }
/** * Verifies whether reference point and the hypervolume method meet certain criteria. * * @param[in] r_point fitness vector describing the reference point * * @throws value_error if reference point's and point set dimension do not agree */ void hypervolume::verify_before_compute(const fitness_vector &r_point, hv_algorithm::base_ptr hv_algorithm) const { if ( m_points[0].size() != r_point.size() ) { pagmo_throw(value_error, "Point set dimensions and reference point dimension must be equal."); } hv_algorithm->verify_before_compute(m_points, r_point); }
/// Implementation of the objective function. /// Add noises to the computed fitness vector. void noisy::objfun_impl(fitness_vector &f, const decision_vector &x) const { //1 - Initialize a temporary fitness vector storing one trial result //and we use it also to init the return value fitness_vector tmp(f.size(),0.0); f=tmp; //2 - We set the seed m_drng.seed(m_seed+m_decision_vector_hash(x)); //3 - We average upon multiple runs for (unsigned int j=0; j< m_trials; ++j) { m_original_problem->objfun(tmp, x); inject_noise_f(tmp); for (fitness_vector::size_type i=0; i<f.size();++i) { f[i] = f[i] + tmp[i] / (double)m_trials; } } }
/** * Verifies whether given algorithm suits the requested data. * * @param[in] points vector of points containing the d dimensional points for which we compute the hypervolume * @param[in] r_point reference point for the vector of points * * @throws value_error when trying to compute the hypervolume for the dimension other than 3 or non-maximal reference point */ void hv3d::verify_before_compute(const std::vector<fitness_vector> &points, const fitness_vector &r_point) const { if (r_point.size() != 3) { pagmo_throw(value_error, "Algorithm hv3d works only for 3-dimensional cases"); } base::assert_minimisation(points, r_point); }
/** * Computes the original fitness of the multi-objective problem. It also updates the ideal point in case * m_adapt_ideal is true * * @param[out] f non-decomposed fitness vector * @param[in] x chromosome */ void decompose::compute_original_fitness(fitness_vector &f, const decision_vector &x) const { m_original_problem->objfun(f,x); if (m_adapt_ideal) { for (fitness_vector::size_type i=0; i<f.size(); ++i) { if (f[i] < m_z[i]) m_z[i] = f[i]; } } }
/// Implementation of the objective function. /// Add noises to the decision vector before calling the actual objective function. void robust::objfun_impl(fitness_vector &f, const decision_vector &x) const { // Temporary storage used for averaging fitness_vector tmp(f.size(),0.0); f = tmp; // Set the seed m_drng.seed(m_seed); // Perturb decision vector and evaluate decision_vector x_perturbed(x); for(unsigned int i = 0; i < m_trials; ++i){ inject_noise_x(x_perturbed); m_original_problem->objfun(tmp, x_perturbed); for(fitness_vector::size_type j = 0; j < f.size(); ++j){ f[j] += tmp[j] / (double)m_trials; } } }
/// Implementation of the objective function. void rastrigin::objfun_impl(fitness_vector &f, const decision_vector &x) const { pagmo_assert(f.size() == 1); const double omega = 2.0 * boost::math::constants::pi<double>(); f[0] = 0; const decision_vector::size_type n = x.size(); for (decision_vector::size_type i = 0; i < n; ++i) { f[0] += x[i] * x[i] - 10.0 * std::cos(omega * x[i]); } f[0] += 10.0 * n; }
/// Implementation of the objective function. void michalewicz::objfun_impl(fitness_vector &f, const decision_vector &x) const { pagmo_assert(f.size() == 1); decision_vector::size_type n = x.size(); double retval = 0.0; for (decision_vector::size_type i=0; i<n; i++){ retval -= sin(x[i]) * pow(sin((i+1)*x[i]*x[i]/boost::math::constants::pi<double>()) , 2*m_m); } f[0] = retval; }
/// Implementation of the objective function. void schwefel::objfun_impl(fitness_vector &f, const decision_vector &x) const { pagmo_assert(f.size() == 1); std::vector<double>::size_type n = x.size(); double value=0; for (std::vector<double>::size_type i=0; i<n; i++){ value += x[i] * sin(sqrt(fabs(x[i]))); } f[0] = 418.9828872724338 * n - value; }
/// Implementation of the objective function. void dejong::objfun_impl(fitness_vector &f, const decision_vector &x) const { pagmo_assert(f.size() == 1); decision_vector::size_type n = x.size(); double retval = 0.0; for (decision_vector::size_type i=0; i<n; i++){ retval += x[i]*x[i]; } f[0] = retval; }
/// Apply noise on a fitness vector void noisy::inject_noise_f(fitness_vector& f) const { for(f_size_type i = 0; i < f.size(); i++){ if(m_noise_type == NORMAL){ f[i] += m_normal_dist(m_drng)*m_param_second+m_param_first; } else if(m_noise_type == UNIFORM){ f[i] += m_uniform_dist(m_drng)*(m_param_second-m_param_first)+m_param_first; } } }
/** * Computes the decomposed fitness from the original multi-objective one and a weight vector * * @param[out] f decomposed fitness vector * @param[in] original_fit original multi-objective fitness vector * @param[in] weights weights vector */ void decompose::compute_decomposed_fitness(fitness_vector &f, const fitness_vector &original_fit, const fitness_vector &weights) const { if ( (m_weights.size() != weights.size()) || (original_fit.size() != m_weights.size()) ) { pagmo_throw(value_error,"Check the sizes of input weights and fitness vector"); } if(m_method == WEIGHTED) { f[0] = 0.0; for(base::f_size_type i = 0; i < m_original_problem->get_f_dimension(); ++i) { f[0]+= weights[i]*original_fit[i]; } } else if (m_method == TCHEBYCHEFF) { f[0] = 0.0; double tmp,weight; for(base::f_size_type i = 0; i < m_original_problem->get_f_dimension(); ++i) { (weights[i]==0) ? (weight = 1e-4) : (weight = weights[i]); //fixes the numerical problem of 0 weights tmp = weight * fabs(original_fit[i] - m_z[i]); if(tmp > f[0]) { f[0] = tmp; } } } else { //BI method const double THETA = 5.0; double d1 = 0.0; double weight_norm = 0.0; for(base::f_size_type i = 0; i < m_original_problem->get_f_dimension(); ++i) { d1 += (original_fit[i] - m_z[i]) * weights[i]; weight_norm += pow(weights[i],2); } weight_norm = sqrt(weight_norm); d1 = fabs(d1)/weight_norm; double d2 = 0.0; for(base::f_size_type i = 0; i < m_original_problem->get_f_dimension(); ++i) { d2 += pow(original_fit[i] - (m_z[i] + d1*weights[i]/weight_norm), 2); } d2 = sqrt(d2); f[0] = d1 + THETA * d2; } }
hv_algorithm::base_ptr hypervolume::get_best_contributions(const fitness_vector &r_point) const { switch(r_point.size()) { case 2: return hv_algorithm::base_ptr(new hv_algorithm::hv2d()); break; case 3: return hv_algorithm::base_ptr(new hv_algorithm::hv3d()); break; default: return hv_algorithm::base_ptr(new hv_algorithm::wfg()); } }
/// Implementation of the objective function. void zdt2::objfun_impl(fitness_vector &f, const decision_vector &x) const { pagmo_assert(f.size() == 2); pagmo_assert(x.size() == 30); double g = 0; f[0] = x[0]; for(problem::base::size_type i = 2; i < 30; ++i) { g += x[i]; } g = 1 + (9 * g) / 29; f[1] = g * ( 1 - (x[0]/g)*(x[0]/g)); }
/// Implementation of the objective function. void zdt6::objfun_impl(fitness_vector &f, const decision_vector &x) const { pagmo_assert(f.size() == 2); pagmo_assert(x.size() == 10); double g = 0; f[0] = 1 - exp(-4*x[0])*pow(sin(6*m_pi*x[0]),6); for(problem::base::size_type i = 2; i < 10; ++i) { g += x[i]; } g = 1 + (9 * g) / 9; f[1] = g * ( 1 - (f[0]/g)*(f[0]/g)); }
/// Implementation of the objective function. void levy5::objfun_impl(fitness_vector &f, const decision_vector &x) const { pagmo_assert(f.size() == 1); decision_vector::size_type n = x.size(); double isum = 0.0; double jsum = 0.0; f[0] = 0; for ( decision_vector::size_type j=0; j<n; j+=2 ) { for ( int i=1; i<=5; i++ ) { isum += (double)(i) * cos((double)(i-1)*x[j] + (double)(i)); jsum += (double)(i) * cos((double)(i+1)*x[j+1] + (double)(i)); } } f[0] = isum*jsum; for ( decision_vector::size_type j=0; j<n; j+=2 ) f[0] += pow(x[j] + 1.42513,2) + pow(x[j+1] + 0.80032,2); }
/// Implementation of the objective function. void sch::objfun_impl(fitness_vector &f, const decision_vector &x) const { pagmo_assert(f.size() == 2 && x.size() == 1); f[0] = x[0]*x[0]; f[1] = (x[0]-2) * (x[0]-2); }
/// Implementation of the objective functions. /// (Wraps over the original implementation) void con2mo::objfun_impl(fitness_vector &f, const decision_vector &x) const { constraint_vector c(m_original_problem->get_c_dimension(),0.); m_original_problem->compute_constraints(c,x); decision_vector original_f(m_original_problem->get_f_dimension(),0.); m_original_problem->objfun(original_f,x); f_size_type original_nbr_obj = original_f.size(); c_size_type number_of_constraints = c.size(); c_size_type number_of_eq_constraints = number_of_constraints - m_original_problem->get_ic_dimension(); c_size_type number_of_violated_constraints = 0; // computes the number of satisfied constraints if(m_method==OBJ_CSTRS){ for(c_size_type i=0; i<number_of_constraints; i++){ if(!m_original_problem->test_constraint(c,i)) number_of_violated_constraints += 1; } } // modify equality constraints to behave as inequality constraints: const std::vector<double> &c_tol = m_original_problem->get_c_tol(); for(c_size_type i=0; i<number_of_constraints; i++) { if(i<number_of_eq_constraints){ c[i] = std::abs(c[i]) - c_tol.at(i); } else{ c[i] = c[i] - c_tol.at(i); } } // clean the fitness vector for(f_size_type i=0; i<f.size(); i++) { f[i] = 0.; } // in all cases, the first objectives holds the initial objectives for(f_size_type i=0; i<original_nbr_obj; i++) { f[i] = original_f.at(i); } switch(m_method) { case OBJ_CSTRS: { for(c_size_type i=0; i<number_of_constraints; i++) { if(c.at(i) > 0.) { f[original_nbr_obj+i] = c.at(i); } else if(number_of_violated_constraints != 0) { f[original_nbr_obj+i] = number_of_violated_constraints; } else { f[original_nbr_obj+i] = 0.; for(f_size_type j=0; j<original_nbr_obj; j++) { f[original_nbr_obj+i] += original_f.at(j); } } } break; } case OBJ_CSTRSVIO: { for(c_size_type i=0; i<number_of_constraints; i++) { if(c.at(i) > 0.) { f[original_nbr_obj] += c.at(i); } } break; } case OBJ_EQVIO_INEQVIO: { // treating equality constraints for(c_size_type i=0; i<number_of_eq_constraints; i++) { if(c.at(i) > 0.) { f[original_nbr_obj] += c.at(i); } } for(c_size_type i=number_of_eq_constraints; i<number_of_constraints; i++) { if(c.at(i) > 0.) { f[original_nbr_obj+1] += c.at(i); } } break; } default: pagmo_throw(value_error, "Error: There are only 3 methods for the constrained to multi-objective!"); break; } }