Exemple #1
0
/// Returns the original version of the decision variables ready to be fed
/// to the original problem
decision_vector rotated::derotate(const decision_vector& x_normed) const
{
	// This may be outside of the original domain, due to the 
	// relaxed variable bounds after rotation -- project it back if so.

	// 1. De-rotate the vector in the normalized space
	Eigen::VectorXd x_normed_vec = Eigen::VectorXd::Zero(x_normed.size());
	Eigen::VectorXd x_derotated_vec;
	for(base::size_type i = 0; i < x_normed.size(); i++){
		x_normed_vec(i) = x_normed[i];	
	}
	x_derotated_vec = m_InvRotate * x_normed_vec;

	// 2. De-normalize the de-rotated vector to the original bounds
	decision_vector x_derotated(x_normed.size(), 0);
	for(base::size_type i = 0; i < x_normed.size(); i++){
		x_derotated[i] = x_derotated_vec(i);
	}
	decision_vector x_wild = denormalize_to_original(x_derotated);

	// 3. The de-normalized vector may be out of bounds, if so project back in
	decision_vector x = projection_via_clipping(x_wild);

	return x;
}
Exemple #2
0
/// Apply noise on the decision vector based on rho
void robust::inject_noise_x(decision_vector &x) const
{
	// We follow the algorithm at
	// http://math.stackexchange.com/questions/87230/picking-random-points-in-the-volume-of-sphere-with-uniform-probability

	// 0. Define the radius
	double radius = m_rho * pow(m_uniform_dist(m_drng),1.0/x.size());

	// 1. Sampling N(0,1) on each dimension
	std::vector<double> perturbation(x.size(), 0.0);
	double c2=0;
	for(size_type i = 0; i < perturbation.size(); i++){
		perturbation[i] = m_normal_dist(m_drng);
		c2 += perturbation[i]*perturbation[i];
	}

	// 2. Normalize the vector
	for(size_type i = 0; i < perturbation.size(); i++){
		perturbation[i] *= (radius / sqrt(c2) );
		x[i] += perturbation[i];
	}

	// 3. Clip the variables to the valid bounds
	for(base::size_type i = 0; i < x.size(); i++){
		x[i] = std::max(x[i], get_lb()[i]);
		x[i] = std::min(x[i], get_ub()[i]);
	}
}
Exemple #3
0
// Used to normalize the original upper and lower bounds
// to [-1, 1], at each dimension
decision_vector rotated::normalize_to_center(const decision_vector& x) const
{
	decision_vector normalized_x(x.size(), 0);
	for(base::size_type i = 0; i < x.size(); i++) {
		if (m_normalize_scale[i] == 0) { //If the bounds witdth is zero
			normalized_x[i] = 0;
		} 
		else {
			normalized_x[i] = (x[i] - m_normalize_translation[i]) / m_normalize_scale[i];
		}
	}
	return normalized_x;
}
Exemple #4
0
double antibodies_problem::compute_distance(const decision_vector &x) const {
	double distance = 0.;

	// hamming distance

	switch(m_method) {
	case(algorithm::cstrs_immune_system::HAMMING): {
		const decision_vector &lb = get_lb();
		const decision_vector &ub = get_ub();

		for(decision_vector::size_type i=0; i<x.size(); i++) {

			std::vector<int> current_binary_gene = double_to_binary(x.at(i), lb.at(i), ub.at(i));

			for(decision_vector::size_type j=0; j<m_pop_antigens.size(); j++) {

				std::vector<int> antigens_binary_gene = double_to_binary((m_pop_antigens.at(j)).at(i), lb.at(i), ub.at(i));

				for(std::vector<int>::size_type k=0; k<antigens_binary_gene.size(); k++) {
					distance += antigens_binary_gene.at(k) && current_binary_gene.at(k);
				}
			}
		}

		// we take the inverse of the distance as the measure we need is
		// how close the x is from the antigen population
		// which means that we need to maximize the ressemblance
		distance = - distance;
		break;
	}
	case(algorithm::cstrs_immune_system::EUCLIDEAN): {
		for(decision_vector::size_type j=0; j<m_pop_antigens.size(); j++) {

			double euclid = 0.;
			const decision_vector &antigen_decision = m_pop_antigens.at(j);

			for(decision_vector::size_type i=0; i<x.size(); i++) {
				euclid += std::pow(x.at(i) - antigen_decision.at(i),2);
			}
			distance += std::sqrt(euclid);
		}

		break;
	}
	}

	return distance;
}
Exemple #5
0
// Write into retval the gradient of the continuous part of the objective function of prob calculated in input.
void gsl_gradient::objfun_numdiff_central(gsl_vector *retval, const problem::base &prob, const decision_vector &input, const double &step_size)
{
	if (input.size() != prob.get_dimension()) {
		pagmo_throw(value_error,"invalid input vector dimension in numerical differentiation of the objective function");
	}
	if (prob.get_f_dimension() != 1) {
		pagmo_throw(value_error,"numerical differentiation of the objective function cannot work on multi-objective problems");
	}
	// Size of the continuous part of the problem.
	const problem::base::size_type cont_size = prob.get_dimension() - prob.get_i_dimension();
	// Structure to pass data to the wrapper.
	objfun_numdiff_wrapper_params pars;
	pars.x = input;
	pars.f.resize(1);
	pars.prob = &prob;
	// GSL function.
	gsl_function F;
	F.function = &objfun_numdiff_wrapper;
	F.params = (void *)&pars;
	double result, abserr;
	// Numerical differentiation component by component.
	for (problem::base::size_type i = 0; i < cont_size; ++i) {
		pars.coord = i;
		gsl_deriv_central(&F,input[i],step_size,&result,&abserr);
		gsl_vector_set(retval,i,result);
	}
}
Exemple #6
0
/**
 * @return the encoded string (std::string)
 */
std::string string_match::pretty(const decision_vector &x) const {
	std::string retval;
	for (decision_vector::size_type i = 0; i < x.size(); ++i) {
		retval += char(x[i]);
	}
	return retval;
}
Exemple #7
0
/// Implementation of the objective function.
void lavor_maculan::objfun_impl(fitness_vector &f, const decision_vector &x) const
{
	const decision_vector::size_type n = x.size();
	f[0] = 0.0;
	for (decision_vector::size_type i = 0 ; i < n ; ++i) {
		f[0] += 1 + cos(3 * x[i]) + (((i % 2 == 1) ? 1 : -1) / (sqrt(10.60099896 - 4.141720682 * cos(x[i]))));
	}
}
Exemple #8
0
/// Implementation of the objective function.
void rosenbrock::objfun_impl(fitness_vector &f, const decision_vector &x) const
{
	const decision_vector::size_type n = x.size();
	f[0]=0;
	for (decision_vector::size_type i=0; i<n-1; ++i){
		f[0] += 100 * (x[i]*x[i] -x[i+1])*(x[i]*x[i] -x[i+1]) + (x[i]-1)*(x[i]-1);
	}
}
Exemple #9
0
/// Implementation of the constraint function.
void luksan_vlcek_3::compute_constraints_impl(constraint_vector &c, const decision_vector &x) const
{
	int n = x.size();
	c[0] = 3.*std::pow(x[0],3) + 2.*x[1] - 5. + std::sin(x[0]-x[1])*std::sin(x[0]+x[1]) - m_cub[0];
	c[1] = m_clb[0] - ( 3.*std::pow(x[0],3) + 2.*x[1] - 5. + std::sin(x[0]-x[1])*std::sin(x[0]+x[1]) );
	c[2] = 4.*x[n-3] - x[n-4]*std::exp(x[n-4]-x[n-3]) - 3 - m_cub[1];
	c[3] = m_clb[1] - ( 4.*x[n-3] - x[n-4]*std::exp(x[n-4]-x[n-3]) - 3 );
}
Exemple #10
0
// For a decision_vector in the normalized [-1, 1] space,
// perform the inverse operation of normalization to get it's
// location in the original space
decision_vector rotated::denormalize_to_original(const decision_vector& x_normed) const
{
	decision_vector denormalized_x(x_normed.size(), 0);
	for(base::size_type i = 0; i < x_normed.size(); i++){
		denormalized_x[i] = (x_normed[i] * m_normalize_scale[i]) + m_normalize_translation[i];
	}
	return denormalized_x;
}
Exemple #11
0
/// Implementation of the objective function.
void rastrigin::objfun_impl(fitness_vector &f, const decision_vector &x) const
{
	pagmo_assert(f.size() == 1);
	const double omega = 2.0 * boost::math::constants::pi<double>();
	f[0] = 0;
	const decision_vector::size_type n = x.size();
	for (decision_vector::size_type i = 0; i < n; ++i) {
		f[0] += x[i] * x[i] - 10.0 * std::cos(omega * x[i]);
	}
	f[0] += 10.0 * n;
}
Exemple #12
0
/// Implementation of the objective function.
void michalewicz::objfun_impl(fitness_vector &f, const decision_vector &x) const
{
	pagmo_assert(f.size() == 1);
	decision_vector::size_type n = x.size();
	double retval = 0.0;

	for (decision_vector::size_type i=0; i<n; i++){
		retval -= sin(x[i]) * pow(sin((i+1)*x[i]*x[i]/boost::math::constants::pi<double>()) , 2*m_m);
	}
	f[0] = retval;
}
Exemple #13
0
/// Implementation of the objective function.
void schwefel::objfun_impl(fitness_vector &f, const decision_vector &x) const
{
	pagmo_assert(f.size() == 1);
	std::vector<double>::size_type n = x.size();
	double value=0;

	for (std::vector<double>::size_type i=0; i<n; i++){
		value += x[i] * sin(sqrt(fabs(x[i])));
		}
		f[0] = 418.9828872724338 * n - value;
}
Exemple #14
0
/// Implementation of the objective function.
void dejong::objfun_impl(fitness_vector &f, const decision_vector &x) const
{
	pagmo_assert(f.size() == 1);
	decision_vector::size_type n = x.size();
	double retval = 0.0;

	for (decision_vector::size_type i=0; i<n; i++){
		retval += x[i]*x[i];
	}
	f[0] = retval;
}
Exemple #15
0
/// Implementation of the objective function.
void luksan_vlcek_3::objfun_impl(fitness_vector &f, const decision_vector &x) const
{
	f[0] = 0.;
	for (decision_vector::size_type i=0; i<(x.size()-2)/2; i++)
	{
		double a1 = x[2*i]+10.*x[2*i+1];
		double a2 = x[2*i+2] - x[2*i+3];
		double a3 = x[2*i+1] - 2.*x[2*i+2];
		double a4 = x[2*i] - x[2*i+3];
		f[0] += a1*a1 + 5.*a2*a2 + std::pow(a3,4)+ 10.*std::pow(a4,4);
	}

}
Exemple #16
0
/// Implementation of the objective function.
void zdt2::objfun_impl(fitness_vector &f, const decision_vector &x) const
{
	pagmo_assert(f.size() == 2);
	pagmo_assert(x.size() == 30);

	double g = 0;

	f[0] = x[0];

	for(problem::base::size_type i = 2; i < 30; ++i) {
		g += x[i];
	}
	g = 1 + (9 * g) / 29;
	
	f[1] = g * ( 1 - (x[0]/g)*(x[0]/g));
	
}
Exemple #17
0
/// Implementation of the objective function.
void zdt6::objfun_impl(fitness_vector &f, const decision_vector &x) const
{
	pagmo_assert(f.size() == 2);
	pagmo_assert(x.size() == 10);

	double g = 0;

	f[0] = 1 - exp(-4*x[0])*pow(sin(6*m_pi*x[0]),6);

	for(problem::base::size_type i = 2; i < 10; ++i) {
		g += x[i];
	}
	g = 1 + (9 * g) / 9;
	
	f[1] = g * ( 1 - (f[0]/g)*(f[0]/g));
	
}
Exemple #18
0
/// Implementation of the objective function.
void levy5::objfun_impl(fitness_vector &f, const decision_vector &x) const
{
	pagmo_assert(f.size() == 1);
	decision_vector::size_type n = x.size();
	double isum = 0.0;
	double jsum = 0.0;
	f[0] = 0;

	for ( decision_vector::size_type j=0; j<n; j+=2 ) {
		for ( int i=1; i<=5; i++ ) {
			isum += (double)(i) * cos((double)(i-1)*x[j] + (double)(i));
			jsum += (double)(i) * cos((double)(i+1)*x[j+1] + (double)(i));
		}
	}

	f[0] = isum*jsum;
	for ( decision_vector::size_type j=0; j<n; j+=2 )
		f[0] += pow(x[j] + 1.42513,2) + pow(x[j+1] + 0.80032,2);

}
Exemple #19
0
/// Implementation of the objective function.
void sch::objfun_impl(fitness_vector &f, const decision_vector &x) const
{
	pagmo_assert(f.size() == 2 && x.size() == 1);
	f[0] = x[0]*x[0];
	f[1] = (x[0]-2) * (x[0]-2);
}