Пример #1
0
void ms::evolve(population &pop) const
{
	// Let's store some useful variables.
	const population::size_type NP = pop.size();

	// Get out if there is nothing to do.
	if (m_starts == 0 || NP == 0) {
		return;
	}

	// Local population used in the algorithm iterations.
	population working_pop(pop);

	//ms main loop
	for (int i=0; i< m_starts; ++i)
	{
		working_pop.reinit();
		m_algorithm->evolve(working_pop);
		if (working_pop.problem().compare_fc(working_pop.get_individual(working_pop.get_best_idx()).cur_f,working_pop.get_individual(working_pop.get_best_idx()).cur_c,
			pop.get_individual(pop.get_worst_idx()).cur_f,pop.get_individual(pop.get_worst_idx()).cur_c
		) )
		{
			//update best population replacing its worst individual with the good one just produced.
			pop.set_x(pop.get_worst_idx(),working_pop.get_individual(working_pop.get_best_idx()).cur_x);
			pop.set_v(pop.get_worst_idx(),working_pop.get_individual(working_pop.get_best_idx()).cur_v);
		}
		if (m_screen_output)
		{
			std::cout << i << ". " << "\tCurrent iteration best: " << working_pop.get_individual(working_pop.get_best_idx()).cur_f << "\tOverall champion: " << pop.champion().f << std::endl;
		}
	}
}
Пример #2
0
/// Evolve method.
void monte_carlo::evolve(population &pop) const
{
	// Let's store some useful variables.
	const problem::base &prob = pop.problem();
	const problem::base::size_type prob_dimension = prob.get_dimension(), prob_i_dimension = prob.get_i_dimension();
	const decision_vector &lb = prob.get_lb(), &ub = prob.get_ub();
	const population::size_type pop_size = pop.size();
	// Get out if there is nothing to do.
	if (pop_size == 0 || m_max_eval == 0) {
		return;
	}
	// Initialise temporary decision vector, fitness vector and decision vector.
	decision_vector tmp_x(prob_dimension);
	fitness_vector tmp_f(prob.get_f_dimension());
	constraint_vector tmp_c(prob.get_c_dimension());
	// Main loop.
	for (std::size_t i = 0; i < m_max_eval; ++i) {
		// Generate a random decision vector.
		for (problem::base::size_type j = 0; j < prob_dimension - prob_i_dimension; ++j) {
			tmp_x[j] = boost::uniform_real<double>(lb[j],ub[j])(m_drng);
		}
		for (problem::base::size_type j = prob_dimension - prob_i_dimension; j < prob_dimension; ++j) {
			tmp_x[j] = boost::uniform_int<int>(lb[j],ub[j])(m_urng);
		}
		// Compute fitness and constraints.
		prob.objfun(tmp_f,tmp_x);
		prob.compute_constraints(tmp_c,tmp_x);
		// Locate the worst individual.
		const population::size_type worst_idx = pop.get_worst_idx();
		if (prob.compare_fc(tmp_f,tmp_c,pop.get_individual(worst_idx).cur_f,pop.get_individual(worst_idx).cur_c)) {
			pop.set_x(worst_idx,tmp_x);
		}
	}
}
Пример #3
0
std::vector<population::individual_type> best_kill_s_policy::select(population &pop) const
{
	pagmo_assert(get_n_individuals(pop) <= pop.size());
	// Gets the number of individuals to select
	const population::size_type migration_rate = get_n_individuals(pop);
	// Create a temporary array of individuals.
	std::vector<population::individual_type> result;
	// Gets the indexes of the best individuals
	std::vector<population::size_type> best_idx = pop.get_best_idx(migration_rate);
	// Puts the best individuals in results
	for (population::size_type i =0; i< migration_rate; ++i) {
		result.push_back(pop.get_individual(best_idx[i]));
	}
	// Remove them from the original population 
	// (note: the champion will still carry information on the best guy ...)
	for (population::size_type i=0 ; i<migration_rate; ++i) {
		pop.reinit(best_idx[i]);
	}
	return result;
}
Пример #4
0
/**
 * Updates the constraints scaling vector with the given population.
 * @param[in] population pop.
 */
void cstrs_self_adaptive::update_c_scaling(const population &pop)
{
	if(*m_original_problem != pop.problem()) {
		pagmo_throw(value_error,"The problem linked to the population is not the same as the problem given in argument.");
	}

	// Let's store some useful variables.
	const population::size_type pop_size = pop.size();

	// get the constraints dimension
	//constraint_vector c(m_original_problem->get_c_dimension(), 0.);
	problem::base::c_size_type prob_c_dimension = m_original_problem->get_c_dimension();
	problem::base::c_size_type number_of_eq_constraints =
			m_original_problem->get_c_dimension() -
			m_original_problem->get_ic_dimension();

	const std::vector<double> &c_tol = m_original_problem->get_c_tol();

	m_c_scaling.resize(m_original_problem->get_c_dimension());
	std::fill(m_c_scaling.begin(),m_c_scaling.end(),0.);

	// evaluates the scaling factor
	for(population::size_type i=0; i<pop_size; i++) {
		// updates the current constraint vector
		const population::individual_type &current_individual = pop.get_individual(i);

		const constraint_vector &c = current_individual.cur_c;

		// computes scaling with the right definition of the constraints (can be in base problem? currently used
		// by con2mo as well)
		for(problem::base::c_size_type j=0; j<number_of_eq_constraints; j++) {
			m_c_scaling[j] = std::max(m_c_scaling[j], std::max(0., (std::abs(c.at(j)) - c_tol.at(j))) );
		}
		for(problem::base::c_size_type j=number_of_eq_constraints; j<prob_c_dimension; j++) {
			m_c_scaling[j] = std::max(m_c_scaling[j], std::max(0., c.at(j) - c_tol.at(j)) );
		}
	}
}
Пример #5
0
void sa_corana::evolve(population &pop) const {

	// Let's store some useful variables.
	const problem::base &prob = pop.problem();
	const problem::base::size_type D = prob.get_dimension(), prob_i_dimension = prob.get_i_dimension(), prob_c_dimension = prob.get_c_dimension(), prob_f_dimension = prob.get_f_dimension();
	const decision_vector &lb = prob.get_lb(), &ub = prob.get_ub();
	const population::size_type NP = pop.size();
	const problem::base::size_type Dc = D - prob_i_dimension;

	//We perform some checks to determine wether the problem/population are suitable for sa_corana
	if ( Dc == 0 ) {
		pagmo_throw(value_error,"There is no continuous part in the problem decision vector for sa_corana to optimise");
	}

	if ( prob_c_dimension != 0 ) {
		pagmo_throw(value_error,"The problem is not box constrained and sa_corana is not suitable to solve it");
	}

	if ( prob_f_dimension != 1 ) {
		pagmo_throw(value_error,"The problem is not single objective and sa_corana is not suitable to solve it");
	}

	//Determines the number of temperature adjustment for the annealing procedure
	const size_t n_T = m_niter / (m_step_adj * m_bin_size * Dc);

	// Get out if there is nothing to do.
	if (NP == 0 || m_niter == 0) {
		return;
	}
	if (n_T == 0) {
		pagmo_throw(value_error,"n_T is zero, increase niter");
	}

	//Starting point is the best individual
	const int bestidx = pop.get_best_idx();
	const decision_vector &x0 = pop.get_individual(bestidx).cur_x;
	const fitness_vector &fit0 = pop.get_individual(bestidx).cur_f;
	//Determines the coefficient to dcrease the temperature
	const double Tcoeff = std::pow(m_Tf/m_Ts,1.0/(double)(n_T));
	//Stores the current and new points
	decision_vector xNEW = x0, xOLD = xNEW;
	fitness_vector fNEW = fit0, fOLD = fNEW;
	//Stores the adaptive steps of each component (integer part included but not used)
	decision_vector step(D,m_range);

	//Stores the number of accepted points per component (integer part included but not used)
	std::vector<int> acp(D,0) ;
	double ratio = 0, currentT = m_Ts, probab = 0;

	//Main SA loops
	for (size_t jter = 0; jter < n_T; ++jter) {
		for (int mter = 0; mter < m_step_adj; ++mter) {
			for (int kter = 0; kter < m_bin_size; ++kter) {
				size_t nter = boost::uniform_int<int>(0,Dc-1)(m_urng);
				for (size_t numb = 0; numb < Dc ; ++numb) {
					nter = (nter + 1) % Dc;
					//We modify the current point actsol by mutating its nter component within
					//a step that we will later adapt
					xNEW[nter] = xOLD[nter] + boost::uniform_real<double>(-1,1)(m_drng) * step[nter] * (ub[nter]-lb[nter]);

					// If new solution produced is infeasible ignore it
					if ((xNEW[nter] > ub[nter]) || (xNEW[nter] < lb[nter])) {
						xNEW[nter]=xOLD[nter];
						continue;
					}
					//And we valuate the objective function for the new point
					prob.objfun(fNEW,xNEW);

					// We decide wether to accept or discard the point
					if (prob.compare_fitness(fNEW,fOLD) ) {
						//accept
						xOLD[nter] = xNEW[nter];
						fOLD = fNEW;
						acp[nter]++;	//Increase the number of accepted values
					} else {
						//test it with Boltzmann to decide the acceptance
						probab = exp ( - fabs(fOLD[0] - fNEW[0] ) / currentT );

						// we compare prob with a random probability.
						if (probab > m_drng()) {
							xOLD[nter] = xNEW[nter];
							fOLD = fNEW;
							acp[nter]++;	//Increase the number of accepted values
						} else {
							xNEW[nter] = xOLD[nter];
						}
					} // end if
				} // end for(nter = 0; ...
			} // end for(kter = 0; ...
			// adjust the step (adaptively)
			for (size_t iter = 0; iter < Dc; ++iter) {
				ratio = (double)acp[iter]/(double)m_bin_size;
				acp[iter] = 0;  //reset the counter
				if (ratio > .6) {
					//too many acceptances, increase the step by a factor 3 maximum
					step[iter] = step [iter] * (1 + 2 *(ratio - .6)/.4);
				} else {
					if (ratio < .4) {
						//too few acceptance, decrease the step by a factor 3 maximum
						step [iter]= step [iter] / (1 + 2 * ((.4 - ratio)/.4));
					};
				};
				//And if it becomes too large, reset it to its initial value
				if ( step[iter] > m_range ) {
					step [iter] = m_range;
				};
			}
		}
		// Cooling schedule
		currentT *= Tcoeff;
	}
	if ( prob.compare_fitness(fOLD,fit0) ){
		pop.set_x(bestidx,xOLD); //new evaluation is possible here......
		std::transform(xOLD.begin(), xOLD.end(), pop.get_individual(bestidx).cur_x.begin(), xOLD.begin(),std::minus<double>());
		pop.set_v(bestidx,xOLD);
	}
}
Пример #6
0
/**
 * Runs the NN_TSP algorithm.
 *
 * @param[in,out] pop input/output pagmo::population to be evolved.
 */
void nn_tsp::evolve(population &pop) const
{
	const problem::base_tsp* prob;
	//check if problem is of type pagmo::problem::base_tsp
	try
	{
	    prob = &dynamic_cast<const problem::base_tsp &>(pop.problem());
	}
	catch (const std::bad_cast& e)
	{
		pagmo_throw(value_error,"Problem not of type pagmo::problem::tsp, nn_tsp can only be called on problem::tsp problems");
	}

	// Let's store some useful variables.
	const problem::base::size_type Nv = prob->get_n_cities();

	//create individuals
	decision_vector best_tour(Nv);
	decision_vector new_tour(Nv);

	//check input parameter
	if (m_start_city < -1 || m_start_city > static_cast<int>(Nv-1)) {
		pagmo_throw(value_error,"invalid value for the first vertex");
	}


	size_t first_city, Nt;
	if(m_start_city == -1){
		first_city = 0;  
	  		Nt = Nv;
	}
	else{
		first_city = m_start_city; 
		Nt = m_start_city+1;
	}

	int length_best_tour, length_new_tour;
	size_t nxt_city, min_idx;
	std::vector<int> not_visited(Nv);
	length_best_tour = 0;

	//main loop
	for (size_t i = first_city; i < Nt; i++) {
		length_new_tour = 0;
		for (size_t j = 0; j < Nv; j++) {
			not_visited[j] = j;
		}
		new_tour[0] = i;
		std::swap(not_visited[new_tour[0]],not_visited[Nv-1]);
		for (size_t j = 1; j < Nv-1; j++) {
			min_idx = 0;
			nxt_city = not_visited[0];
			for (size_t l = 1; l < Nv-j; l++) {
				if(prob->distance(new_tour[j-1], not_visited[l]) < prob->distance(new_tour[j-1], nxt_city) )
			{
					min_idx = l;		
					nxt_city = not_visited[l];}
			}
			new_tour[j] = nxt_city;
			length_new_tour += prob->distance(new_tour[j-1], nxt_city);
			std::swap(not_visited[min_idx],not_visited[Nv-j-1]);
		}
		new_tour[Nv-1] = not_visited[0];
		length_new_tour += prob->distance(new_tour[Nv-2], new_tour[Nv-1]);
		length_new_tour += prob->distance(new_tour[Nv-1], new_tour[0]);
		if(i == first_city || length_new_tour < length_best_tour){
			best_tour = new_tour;
			length_best_tour = length_new_tour;
		}
	}
		
	//change representation of tour
	population::size_type best_idx = pop.get_best_idx();
	switch( prob->get_encoding() ) {
	    case problem::base_tsp::FULL:
	        pop.set_x(best_idx,prob->cities2full(best_tour));
	        break;
	    case problem::base_tsp::RANDOMKEYS:
	        pop.set_x(best_idx,prob->cities2randomkeys(best_tour,pop.get_individual(best_idx).cur_x));
	        break;
	    case problem::base_tsp::CITIES:
	        pop.set_x(best_idx,best_tour);
	        break;
	}

} // end of evolve
Пример #7
0
/**
 * The best member of the population will be used as starting point for the minimisation process. The algorithm will stop
 * if the gradient falls below the grad_tol parameter, if the maximum number of iterations max_iter is exceeded or if
 * the inner GSL routine call reports an error (which will be logged on std::cout). After the end of the minimisation process,
 * the minimised decision vector will replace the best individual in the population, after being modified to fall within
 * the problem bounds if necessary.
 *
 * @param[in,out] pop population to evolve.
 */
void gsl_gradient::evolve(population &pop) const
{
	// Do nothing if the population is empty.
	if (!pop.size()) {
		return;
	}
	// Useful variables.
	const problem::base &problem = pop.problem();
	if (problem.get_f_dimension() != 1) {
		pagmo_throw(value_error,"this algorithm does not support multi-objective optimisation");
	}
	if (problem.get_c_dimension()) {
		pagmo_throw(value_error,"this algorithm does not support constrained optimisation");
	}
	const problem::base::size_type cont_size = problem.get_dimension() - problem.get_i_dimension();
	if (!cont_size) {
		pagmo_throw(value_error,"the problem has no continuous part");
	}
	// Extract the best individual.
	const population::size_type best_ind_idx = pop.get_best_idx();
	const population::individual_type &best_ind = pop.get_individual(best_ind_idx);
	// GSL wrapper parameters structure.
	objfun_wrapper_params params;
	params.p = &problem;
	// Integer part of the temporay decision vector must be filled with the integer part of the best individual,
	// which will not be optimised.
	params.x.resize(problem.get_dimension());
	std::copy(best_ind.cur_x.begin() + cont_size, best_ind.cur_x.end(), params.x.begin() + cont_size);
	params.f.resize(1);
	params.step_size = m_numdiff_step_size;
	// GSL function structure.
	gsl_multimin_function_fdf gsl_func;
	gsl_func.n = boost::numeric_cast<std::size_t>(cont_size);
	gsl_func.f = &objfun_wrapper;
	gsl_func.df = &d_objfun_wrapper;
	gsl_func.fdf = &fd_objfun_wrapper;
	gsl_func.params = (void *)&params;
	// Minimiser.
	gsl_multimin_fdfminimizer *s = 0;
	// This will be the starting point.
	gsl_vector *x = 0;
	// Here we start the allocations.
	// Recast as size_t here, in order to avoid potential overflows later.
	const std::size_t s_cont_size = boost::numeric_cast<std::size_t>(cont_size);
	// Allocate and check the allocation results.
	x = gsl_vector_alloc(s_cont_size);
	const gsl_multimin_fdfminimizer_type *minimiser = get_gsl_minimiser_ptr();
	pagmo_assert(minimiser);
	s = gsl_multimin_fdfminimizer_alloc(minimiser,s_cont_size);
	// Check the allocations.
	check_allocs(x,s);
	// Fill in the starting point (from the best individual).
	for (std::size_t i = 0; i < s_cont_size; ++i) {
		gsl_vector_set(x,i,best_ind.cur_x[i]);
	}
	// Init the solver.
	gsl_multimin_fdfminimizer_set(s,&gsl_func,x,m_step_size,m_tol);
	// Iterate.
	std::size_t iter = 0;
	int status;
	try {
		do
		{
			++iter;
			status = gsl_multimin_fdfminimizer_iterate(s);
			if (status) {
				break;
			}
			status = gsl_multimin_test_gradient(s->gradient,m_grad_tol);
		} while (status == GSL_CONTINUE && iter < m_max_iter);
	} catch (const std::exception &e) {
		// Cleanup and re-throw.
		cleanup(x,s);
		throw e;
	} catch (...) {
		// Cleanup and throw.
		cleanup(x,s);
		pagmo_throw(std::runtime_error,"unknown exception caught in gsl_gradient::evolve");
	}
	// Free up resources.
	cleanup(x,s);
	// Check the generated individual and change it to respect the bounds as necessary.
	for (problem::base::size_type i = 0; i < cont_size; ++i) {
		if (params.x[i] < problem.get_lb()[i]) {
			params.x[i] = problem.get_lb()[i];
		}
		if (params.x[i] > problem.get_ub()[i]) {
			params.x[i] = problem.get_ub()[i];
		}
	}
	// Replace the best individual.
	pop.set_x(best_ind_idx,params.x);
}
Пример #8
0
Файл: ihs.cpp Проект: YS-L/pagmo
void ihs::evolve(population &pop) const
{
	// Let's store some useful variables.
	const problem::base &prob = pop.problem();
	const problem::base::size_type prob_dimension = prob.get_dimension(), prob_i_dimension = prob.get_i_dimension();
	const decision_vector &lb = prob.get_lb(), &ub = prob.get_ub();
	const population::size_type pop_size = pop.size();
	// Get out if there is nothing to do.
	if (pop_size == 0 || m_gen == 0) {
		return;
	}
	decision_vector lu_diff(prob_dimension);
	for (problem::base::size_type i = 0; i < prob_dimension; ++i) {
		lu_diff[i] = ub[i] - lb[i];
	}
	// Int distribution to be used when picking random individuals.
	boost::uniform_int<population::size_type> uni_int(0,pop_size - 1);
	const double c = std::log(m_bw_min/m_bw_max) / m_gen;
	// Temporary individual used during evolution.
	population::individual_type tmp;
	tmp.cur_x.resize(prob_dimension);
	tmp.cur_f.resize(prob.get_f_dimension());
	tmp.cur_c.resize(prob.get_c_dimension());
	for (std::size_t g = 0; g < m_gen; ++g) {
		const double ppar_cur = m_ppar_min + ((m_ppar_max - m_ppar_min) * g) / m_gen, bw_cur = m_bw_max * std::exp(c * g);
		// Continuous part.
		for (problem::base::size_type i = 0; i < prob_dimension - prob_i_dimension; ++i) {
			if (m_drng() < m_phmcr) {
				// tmp's i-th chromosome element is the one from a randomly chosen individual.
				tmp.cur_x[i] = pop.get_individual(uni_int(m_urng)).cur_x[i];
				// Do pitch adjustment with ppar_cur probability.
				if (m_drng() < ppar_cur) {
					// Randomly, add or subtract pitch from the current chromosome element.
					if (m_drng() > .5) {
						tmp.cur_x[i] += m_drng() * bw_cur * lu_diff[i];
					} else {
						tmp.cur_x[i] -= m_drng() * bw_cur * lu_diff[i];
					}
					// Handle the case in which we added or subtracted too much and ended up out
					// of boundaries.
					if (tmp.cur_x[i] > ub[i]) {
						tmp.cur_x[i] = boost::uniform_real<double>(lb[i],ub[i])(m_drng);
					} else if (tmp.cur_x[i] < lb[i]) {
						tmp.cur_x[i] = boost::uniform_real<double>(lb[i],ub[i])(m_drng);
					}
				}
			} else {
				// Pick randomly within the bounds.
				tmp.cur_x[i] = boost::uniform_real<double>(lb[i],ub[i])(m_drng);
			}
		}

		//Integer Part
		for (problem::base::size_type i = prob_dimension - prob_i_dimension; i < prob_dimension; ++i) {
			if (m_drng() < m_phmcr) {
				tmp.cur_x[i] = pop.get_individual(uni_int(m_urng)).cur_x[i];
				if (m_drng() < ppar_cur) {
					if (m_drng() > .5) {
						tmp.cur_x[i] += double_to_int::convert(m_drng() * bw_cur * lu_diff[i]);
					} else {
						tmp.cur_x[i] -= double_to_int::convert(m_drng() * bw_cur * lu_diff[i]);
					}
					// Wrap over in case we went past the bounds.
					if (tmp.cur_x[i] > ub[i]) {
						tmp.cur_x[i] = lb[i] + double_to_int::convert(tmp.cur_x[i] - ub[i]) % static_cast<int>(lu_diff[i]);
					} else if (tmp.cur_x[i] < lb[i]) {
						tmp.cur_x[i] = ub[i] - double_to_int::convert(lb[i] - tmp.cur_x[i]) % static_cast<int>(lu_diff[i]);
					}
				}
			} else {
				// Pick randomly within the bounds.
				tmp.cur_x[i] = boost::uniform_int<int>(lb[i],ub[i])(m_urng);
			}
		}
		// And we push him back
		pop.push_back(tmp.cur_x);
		// We locate the worst individual.
		const population::size_type worst_idx = pop.get_worst_idx();
		// And we get rid of him :)
		pop.erase(worst_idx);
	}
}
Пример #9
0
/**
 * Runs the Inverover algorithm for the number of generations specified in the constructor.
 *
 * @param[in,out] pop input/output pagmo::population to be evolved.
 */
void inverover::evolve(population &pop) const
{
	const problem::base_tsp* prob;
	//check if problem is of type pagmo::problem::base_tsp
	try {
		const problem::base_tsp& tsp_prob = dynamic_cast<const problem::base_tsp &>(pop.problem());
		prob = &tsp_prob;
	}
	catch (const std::bad_cast& e) {
		pagmo_throw(value_error,"Problem not of type pagmo::problem::base_tsp");
	}

	// Let's store some useful variables.
	const population::size_type NP = pop.size();
	const problem::base::size_type Nv = prob->get_n_cities();

	// Initializing the random number generators
	boost::uniform_real<double> uniform(0.0, 1.0);
	boost::variate_generator<boost::lagged_fibonacci607 &, boost::uniform_real<double> > unif_01(m_drng, uniform);
	boost::uniform_int<int> NPless1(0, NP - 2);
	boost::variate_generator<boost::mt19937 &, boost::uniform_int<int> > unif_NPless1(m_urng, NPless1);
	boost::uniform_int<int> Nv_(0, Nv - 1);
	boost::variate_generator<boost::mt19937 &, boost::uniform_int<int> > unif_Nv(m_urng, Nv_);
	boost::uniform_int<int> Nvless1(0, Nv - 2);
	boost::variate_generator<boost::mt19937 &, boost::uniform_int<int> > unif_Nvless1(m_urng, Nvless1);

	//create own local population
	std::vector<decision_vector> my_pop(NP, decision_vector(Nv));

	//check if some individuals in the population that is passed as a function input are feasible.
	bool feasible;
	std::vector<int> not_feasible;
	for (size_t i = 0; i < NP; i++) {
		feasible = prob->feasibility_x(pop.get_individual(i).cur_x);
		if(feasible) { //if feasible store it in my_pop
			switch(prob->get_encoding()) {
				case problem::base_tsp::FULL:
					my_pop[i] = prob->full2cities(pop.get_individual(i).cur_x);
					break;
				case problem::base_tsp::RANDOMKEYS:
					my_pop[i] = prob->randomkeys2cities(pop.get_individual(i).cur_x);
					break;
				case problem::base_tsp::CITIES:
					my_pop[i] = pop.get_individual(i).cur_x;
					break;
			}
		} else {
			not_feasible.push_back(i);
		}
	}

	//replace the not feasible individuals by feasible ones
	int i;
	switch (m_ini_type) {
		case 0:
		{
		//random initialization (produces feasible individuals)
			for (size_t ii = 0; ii < not_feasible.size(); ii++) {
				i = not_feasible[ii];
				for (size_t j = 0; j < Nv; j++) {
					my_pop[i][j] = j;
				}
			}
			int tmp;
			size_t rnd_idx;
			for (size_t j = 1; j < Nv-1; j++) {
					boost::uniform_int<int> dist_(j, Nv - 1);
					boost::variate_generator<boost::mt19937 &, boost::uniform_int<int> > dist(m_urng,dist_);
					
				for (size_t ii = 0; ii < not_feasible.size(); ii++) {
					i = not_feasible[ii];
					rnd_idx = dist();
					tmp = my_pop[i][j];
					my_pop[i][j] = my_pop[i][rnd_idx];
					my_pop[i][rnd_idx] = tmp;
				}

			}
			break;
		}
		case 1:
		{
		//initialize with nearest neighbor algorithm
		std::vector<int> starting_notes(std::max(Nv,not_feasible.size()));
			for (size_t j = 0; j < starting_notes.size(); j++) {
					starting_notes[j] = j;
			}
			//std::shuffle(starting_notes.begin(), starting_notes.end(), m_urng);
			for (size_t ii = 0; ii < not_feasible.size(); ii++) {
				i = not_feasible[ii];
				pagmo::population one_ind_pop(pop.problem(), 1);
				std::cout << starting_notes[i] << ' ';
				pagmo::algorithm::nn_tsp algo(starting_notes[i] % Nv);
				algo.evolve(one_ind_pop);
				switch( prob->get_encoding() ) {
					case problem::base_tsp::FULL:
						my_pop[i] = prob->full2cities(one_ind_pop.get_individual(0).cur_x);
						break;
					case problem::base_tsp::RANDOMKEYS:
						my_pop[i] = prob->randomkeys2cities(one_ind_pop.get_individual(0).cur_x);
						break;
					case problem::base_tsp::CITIES:
						my_pop[i] = one_ind_pop.get_individual(0).cur_x;
						break;
				}
				std::cout << i << ' ' << one_ind_pop.get_individual(0).cur_f << std::endl;
			}
			break;
		}
		default:
			pagmo_throw(value_error,"Invalid initialization type");
	}

	std::vector<fitness_vector>  fitness(NP, fitness_vector(1));
	for(size_t i=0; i < NP; i++){
		switch( prob->get_encoding() ) {
			case problem::base_tsp::FULL:
				fitness[i] = prob->objfun(prob->full2cities(my_pop[i]));
				break;
			case problem::base_tsp::RANDOMKEYS:
				fitness[i] = prob->objfun(prob->cities2randomkeys(my_pop[i], pop.get_individual(i).cur_x));
				break;
			case problem::base_tsp::CITIES:
				fitness[i] = prob->objfun(my_pop[i]);
				break;
		}
	}


	decision_vector tmp_tour(Nv);
	bool stop, changed;
	size_t rnd_num, i2, pos1_c1, pos1_c2, pos2_c1, pos2_c2; //pos2_c1 denotes the position of city1 in parent2
	fitness_vector fitness_tmp;

	//InverOver main loop
	for(int iter = 0; iter < m_gen; iter++) {
		for(size_t i1 = 0; i1 < NP; i1++) {
			tmp_tour = my_pop[i1];
			pos1_c1 = unif_Nv();
			stop = false;
			changed = false;
			while(!stop){
				if(unif_01() < m_ri) {
					rnd_num = unif_Nvless1();
					pos1_c2 = (rnd_num == pos1_c1? Nv-1:rnd_num);
				} else {
					i2 = unif_NPless1();
					i2 = (i2 == i1? NP-1:i2);
					pos2_c1 = std::find(my_pop[i2].begin(),my_pop[i2].end(),tmp_tour[pos1_c1])-my_pop[i2].begin();
					pos2_c2 = (pos2_c1 == Nv-1? 0:pos2_c1+1);
					pos1_c2 = std::find(tmp_tour.begin(),tmp_tour.end(),my_pop[i2][pos2_c2])-tmp_tour.begin();
				}
				stop = (abs(pos1_c1-pos1_c2)==1 || static_cast<problem::base::size_type>(abs(pos1_c1-pos1_c2))==Nv-1);
				if(!stop) {
					changed = true;
					if(pos1_c1<pos1_c2) {
						for(size_t l=0; l < (double (pos1_c2-pos1_c1-1)/2); l++) {
							std::swap(tmp_tour[pos1_c1+1+l],tmp_tour[pos1_c2-l]);
						}
						pos1_c1 = pos1_c2;
					} else {
						//inverts the section from c1 to c2 (see documentation Note3)
						for(size_t l=0; l < (double (pos1_c1-pos1_c2-1)/2); l++) {
							std::swap(tmp_tour[pos1_c2+l],tmp_tour[pos1_c1-l-1]);
						}
						pos1_c1 = (pos1_c2 == 0? Nv-1:pos1_c2-1);
					}
					
				}
			} //end of while loop (looping over a single indvidual)
			if(changed) {
				switch(prob->get_encoding()) {
					case problem::base_tsp::FULL:
						fitness_tmp = prob->objfun(prob->full2cities(tmp_tour));
						break;
					case problem::base_tsp::RANDOMKEYS: //using "randomly" index 0 as a temporary template
						fitness_tmp = prob->objfun(prob->cities2randomkeys(tmp_tour, pop.get_individual(0).cur_x));
						break;
					case problem::base_tsp::CITIES:
						fitness_tmp = prob->objfun(tmp_tour);
						break;
				}
				if(prob->compare_fitness(fitness_tmp,fitness[i1])) { //replace individual?
					my_pop[i1] = tmp_tour;
					fitness[i1][0] = fitness_tmp[0];
				}
			}
		} // end of loop over population
	} // end of loop over generations

	//change representation of tour
	for (size_t ii = 0; ii < NP; ii++) {
		switch(prob->get_encoding()) {
			case problem::base_tsp::FULL:
				pop.set_x(ii,prob->cities2full(my_pop[ii]));
				break;
			case problem::base_tsp::RANDOMKEYS:
				pop.set_x(ii,prob->cities2randomkeys(my_pop[ii],pop.get_individual(ii).cur_x));
				break;
			case problem::base_tsp::CITIES:
				pop.set_x(ii,my_pop[ii]);
				break;
		}
	}
} // end of evolve
Пример #10
0
// Evolve method.
void base_nlopt::evolve(population &pop) const
{
	// Useful variables.
	const problem::base &problem = pop.problem();
	if (problem.get_f_dimension() != 1) {
		pagmo_throw(value_error,"this algorithm does not support multi-objective optimisation");
	}
	const problem::base::c_size_type c_size = problem.get_c_dimension();
	const problem::base::c_size_type ec_size = problem.get_c_dimension() - problem.get_ic_dimension();
	if (c_size && !m_constrained) {
		pagmo_throw(value_error,"this algorithm does not support constraints");
	}
	if (ec_size && m_only_ineq) {
		pagmo_throw(value_error,"this algorithm does not support equality constraints");
	}
	const problem::base::size_type cont_size = problem.get_dimension() - problem.get_i_dimension();
	if (!cont_size) {
		pagmo_throw(value_error,"the problem has no continuous part");
	}
	// Do nothing if the population is empty.
	if (!pop.size()) {
		return;
	}
	// Extract the best individual and set the inital point
	const population::size_type best_ind_idx = pop.get_best_idx();
	const population::individual_type &best_ind = pop.get_individual(best_ind_idx);

	
	// Structure to pass data to the objective function wrapper.
	nlopt_wrapper_data data_objfun;

	data_objfun.prob = &problem;
	data_objfun.x.resize(problem.get_dimension());
	data_objfun.dx.resize(problem.get_dimension());
	data_objfun.f.resize(1);
	
	// Structure to pass data to the constraint function wrapper.
	std::vector<nlopt_wrapper_data> data_constrfun(boost::numeric_cast<std::vector<nlopt_wrapper_data>::size_type>(c_size));
	for (problem::base::c_size_type i = 0; i < c_size; ++i) {
		data_constrfun[i].prob = &problem;
		data_constrfun[i].x.resize(problem.get_dimension());
		data_constrfun[i].dx.resize(problem.get_dimension());
		data_constrfun[i].c.resize(problem.get_c_dimension());
		data_constrfun[i].c_comp = i;
	}

	// Main NLopt call.
	nlopt::opt opt(m_algo, problem.get_dimension());
	m_opt = opt;
	// Sets local optimizer for aug_lag methods, do nothing otherwise
	set_local(problem.get_dimension());
	m_opt.set_lower_bounds(problem.get_lb());
	m_opt.set_upper_bounds(problem.get_ub());
	m_opt.set_min_objective(objfun_wrapper, &data_objfun);
	for (problem::base::c_size_type i =0; i<ec_size; ++i) {
		m_opt.add_equality_constraint(constraints_wrapper, &data_constrfun[i], problem.get_c_tol().at(i));
	}
	for (problem::base::c_size_type i =ec_size; i<c_size; ++i) {
		m_opt.add_inequality_constraint(constraints_wrapper, &data_constrfun[i], problem.get_c_tol().at(i));
	}

	m_opt.set_ftol_abs(m_ftol);
	m_opt.set_xtol_abs(m_xtol);
	m_opt.set_maxeval(m_max_iter);

	//nlopt::result result;
	double dummy;
	decision_vector x0(best_ind.cur_x);
	m_opt.optimize(x0, dummy);
	pop.set_x(best_ind_idx,x0);
}
Пример #11
0
    /**
     * Runs the Inverover algorithm for the number of generations specified in the constructor.
     *
     * @param[in,out] pop input/output pagmo::population to be evolved.
     */
    void inverover::evolve(population &pop) const
    {

	const problem::tsp* prob;
	//check if problem is of type pagmo::problem::tsp
	try
	{
		const problem::tsp& tsp_prob = dynamic_cast<const problem::tsp &>(pop.problem());
		prob = &tsp_prob;
	}
	catch (const std::bad_cast& e)
	{
		pagmo_throw(value_error,"Problem not of type pagmo::problem::tsp");
	}
	
	// Let's store some useful variables.

	const population::size_type NP = pop.size();
	const std::vector<std::vector<double> >& weights = prob->get_weights();
	const problem::base::size_type Nv = prob->get_n_cities();

	// Get out if there is nothing to do.
	if (m_gen == 0) {
		return;
	}
	
	// Initializing the random number generators
	boost::uniform_real<double> uniform(0.0,1.0);
	boost::variate_generator<boost::lagged_fibonacci607 &, boost::uniform_real<double> > unif_01(m_drng,uniform);
	boost::uniform_int<int> NPless1(0, NP - 2);
	boost::variate_generator<boost::mt19937 &, boost::uniform_int<int> > unif_NPless1(m_urng,NPless1);
	boost::uniform_int<int> Nv_(0, Nv - 1);
	boost::variate_generator<boost::mt19937 &, boost::uniform_int<int> > unif_Nv(m_urng,Nv_);
	boost::uniform_int<int> Nvless1(0, Nv - 2);
	boost::variate_generator<boost::mt19937 &, boost::uniform_int<int> > unif_Nvless1(m_urng,Nvless1);


	//check if we have a symmetric problem (symmetric weight matrix)
	bool is_sym = true;
	for(size_t i = 0; i < Nv; i++)
	{
		for(size_t j = i+1; j < Nv; j++)
		{
			if(weights[i][j] != weights[j][i])
			{
				is_sym = false;
				goto end_loop;
			}
		}
	}
	end_loop:	
	
	//create own local population
	std::vector<decision_vector> my_pop(NP, decision_vector(Nv));

	//check if some individuals in the population that is passed as a function input are feasible.
	bool feasible;
	std::vector<int> not_feasible;
	for (size_t i = 0; i < NP; i++) {
		feasible = prob->feasibility_x(pop.get_individual(i).cur_x);
		if(feasible){ //if feasible store it in my_pop
			switch( prob->get_encoding() ) {
			    case problem::tsp::FULL:
			        my_pop[i] = prob->full2cities(pop.get_individual(i).cur_x);
			        break;
			    case problem::tsp::RANDOMKEYS:
			        my_pop[i] = prob->randomkeys2cities(pop.get_individual(i).cur_x);
			        break;
			    case problem::tsp::CITIES:
			        my_pop[i] = pop.get_individual(i).cur_x;
			        break;
			}
		}
		else
		{
			not_feasible.push_back(i);
		}
	}

	//replace the not feasible individuals by feasible ones	
	int i;		
	switch (m_ini_type){
		case 0:
		{
		//random initialization (produces feasible individuals)
			for (size_t ii = 0; ii < not_feasible.size(); ii++) {
				i = not_feasible[ii];
				for (size_t j = 0; j < Nv; j++) {
					my_pop[i][j] = j;
				}
			}
			int tmp;
			size_t rnd_idx;
			for (size_t j = 1; j < Nv-1; j++) {
	        		boost::uniform_int<int> dist_(j, Nv - 1);
					boost::variate_generator<boost::mt19937 &, boost::uniform_int<int> > dist(m_urng,dist_);
					
				for (size_t ii = 0; ii < not_feasible.size(); ii++) {
					i = not_feasible[ii];
					rnd_idx = dist();
					tmp = my_pop[i][j];
					my_pop[i][j] = my_pop[i][rnd_idx];
					my_pop[i][rnd_idx] = tmp;
				}	

			}
			break;
		}
		case 1:
		{
		//initialize with nearest neighbor algorithm
			int nxt_city;
			size_t min_idx;
			std::vector<int> not_visited(Nv);
			for (size_t ii = 0; ii < not_feasible.size(); ii++) {
				i = not_feasible[ii];
				for (size_t j = 0; j < Nv; j++) {
					not_visited[j] = j;
				}
				my_pop[i][0] = unif_Nv();
				std::swap(not_visited[my_pop[i][0]],not_visited[Nv-1]);
				for (size_t j = 1; j < Nv-1; j++) {
					min_idx = 0;
					nxt_city = not_visited[0];
					for (size_t l = 1; l < Nv-j; l++) {
						if(weights[my_pop[i][j-1]][not_visited[l]] < weights[my_pop[i][j-1]][nxt_city]){
							min_idx = l;		
							nxt_city = not_visited[l];}
					}
					my_pop[i][j] = nxt_city;
					std::swap(not_visited[min_idx],not_visited[Nv-j-1]);
				}
				my_pop[i][Nv-1] = not_visited[0];
			}
			break;
		}
		default:
			pagmo_throw(value_error,"Invalid initialization type");
	}

	//compute fitness of individuals (necessary if weight matrix is not symmetric)
	std::vector<double>  fitness(NP, 0);
	if(!is_sym){
		for(size_t i=0; i < NP; i++){
    			fitness[i] = weights[my_pop[i][Nv-1]][my_pop[i][0]];
    			for(size_t k=1; k < Nv; k++){
        			fitness[i] += weights[my_pop[i][k-1]][my_pop[i][k]];
			}
		}
	}	
	
	decision_vector tmp_tour(Nv);
	bool stop;
	size_t rnd_num, i2, pos1_c1, pos1_c2, pos2_c1, pos2_c2; //pos2_c1 denotes the position of city1 in parent2
	double fitness_change, fitness_tmp = 0;

	//InverOver main loop
	for(int iter = 0; iter < m_gen; iter++){
		for(size_t i1 = 0; i1 < NP; i1++){
			fitness_change = 0;
			tmp_tour = my_pop[i1];
			pos1_c1 = unif_Nv();
			stop = false;
			while(!stop){
				if(unif_01() < m_ri){
					rnd_num = unif_Nvless1();
					pos1_c2 = (rnd_num == pos1_c1? Nv-1:rnd_num);
				}
				else{
					i2 = unif_NPless1();
					i2 = (i2 == i1? NP-1:i2);
					pos2_c1 = std::find(my_pop[i2].begin(),my_pop[i2].end(),tmp_tour[pos1_c1])-my_pop[i2].begin();
					pos2_c2 = (pos2_c1 == Nv-1? 0:pos2_c1+1);
					pos1_c2 = std::find(tmp_tour.begin(),tmp_tour.end(),my_pop[i2][pos2_c2])-tmp_tour.begin();
				}
				stop = (abs(pos1_c1-pos1_c2)==1 || abs(pos1_c1-pos1_c2)==Nv-1);
				if(!stop){
					
					if(pos1_c1<pos1_c2){
						for(size_t l=0; l < (double (pos1_c2-pos1_c1-1)/2); l++){
							std::swap(tmp_tour[pos1_c1+1+l],tmp_tour[pos1_c2-l]);}
						if(is_sym){
							fitness_change -= weights[tmp_tour[pos1_c1]][tmp_tour[pos1_c2]] + weights[tmp_tour[pos1_c1+1]][tmp_tour[pos1_c2+1 - (pos1_c2+1 > Nv-1? Nv:0)]];
							fitness_change += weights[tmp_tour[pos1_c1]][tmp_tour[pos1_c1+1]] + weights[tmp_tour[pos1_c2]][tmp_tour[pos1_c2+1 - (pos1_c2+1 > Nv-1? Nv:0)]];
						}
					}
					else{
						//inverts the section from c1 to c2 (see documentation Note3)
						
						for(size_t l=0; l < (double (Nv-(pos1_c1-pos1_c2)-1)/2); l++){
							std::swap(tmp_tour[pos1_c1+1+l - (pos1_c1+1+l>Nv-1? Nv:0)],tmp_tour[pos1_c2-l + (pos1_c2<l? Nv:0)]);}
						if(is_sym){
							fitness_change -= weights[tmp_tour[pos1_c1]][tmp_tour[pos1_c2]] + weights[tmp_tour[pos1_c1+1 - (pos1_c1+1 > Nv-1? Nv:0)]][tmp_tour[pos1_c2+1]];
							fitness_change += weights[tmp_tour[pos1_c1]][tmp_tour[pos1_c1+1 - (pos1_c1+1 > Nv-1? Nv:0)]] + weights[tmp_tour[pos1_c2]][tmp_tour[pos1_c2+1]];
						}
						
					}
					pos1_c1 = pos1_c2; //better performance than original Inver-Over (shorter tour in less time)
				}
			} //end of while loop (looping over a single indvidual)
			if(!is_sym){ //compute fitness of the temporary tour
    				fitness_tmp = weights[tmp_tour[Nv-1]][tmp_tour[0]];
    				for(size_t k=1; k < Nv; k++){
        				fitness_tmp += weights[tmp_tour[k-1]][tmp_tour[k]];
				}
				fitness_change = fitness_tmp - fitness[i1]; 
			}
	
			if(fitness_change < 0){ //replace individual?
				my_pop[i1] = tmp_tour;
				if(!is_sym){
					fitness[i1] = fitness_tmp;
				}
			}
			
			
		} //end of loop over population
	} //end of loop over generations


	//change representation of tour
    	for (size_t ii = 0; ii < NP; ii++) {
			switch( prob->get_encoding() ) {
			    case problem::tsp::FULL:
			        pop.set_x(ii,prob->cities2full(my_pop[ii]));
			        break;
			    case problem::tsp::RANDOMKEYS:
			        pop.set_x(ii,prob->cities2randomkeys(my_pop[ii],pop.get_individual(ii).cur_x));
			        break;
			    case problem::tsp::CITIES:
			        pop.set_x(ii,my_pop[ii]);
			        break;
			}
		}

    } // end of evolve
Пример #12
0
// Selection implementation.
std::vector<std::pair<population::size_type,std::vector<population::individual_type>::size_type> >
hv_fair_r_policy::select(const std::vector<population::individual_type> &immigrants, const population &dest) const
{
    // Fall back to fair_r_policy when facing a single-objective problem.
    if (dest.problem().get_f_dimension() == 1) {
        return fair_r_policy(m_rate, m_type).select(immigrants, dest);
    }

    std::vector<population::individual_type> filtered_immigrants;
    filtered_immigrants.reserve(immigrants.size());

    // Keeps information on the original indexing of immigrants after we filter out the duplicates
    std::vector<unsigned int> original_immigrant_indices;
    original_immigrant_indices.reserve(immigrants.size());

    // Remove the duplicates from the set of immigrants
    std::vector<population::individual_type>::iterator im_it = (const_cast<std::vector<population::individual_type> &>(immigrants)).begin();
    unsigned int im_idx = 0;
    for( ; im_it != immigrants.end() ; ++im_it) {
        decision_vector im_x((*im_it).cur_x);

        bool equal = true;
        for ( unsigned int idx = 0 ; idx < dest.size() ; ++idx ) {
            decision_vector isl_x(dest.get_individual(idx).cur_x);
            equal = true;
            for (unsigned int d_idx = 0 ; d_idx < im_x.size() ; ++d_idx) {
                if (im_x[d_idx] != isl_x[d_idx]) {
                    equal = false;
                    break;
                }
            }
            if (equal) {
                break;
            }
        }
        if (!equal) {
            filtered_immigrants.push_back(*im_it);
            original_immigrant_indices.push_back(im_idx);
        }
        ++im_idx;
    }

    // Computes the number of immigrants to be selected (accounting for the destination pop size)
    const population::size_type rate_limit = std::min<population::size_type>(get_n_individuals(dest), boost::numeric_cast<population::size_type>(filtered_immigrants.size()));

    // Defines the retvalue
    std::vector<std::pair<population::size_type, std::vector<population::individual_type>::size_type> > result;

    // Skip the remaining computation if there's nothing to do
    if (rate_limit == 0) {
        return result;
    }

    // Makes a copy of the destination population
    population pop_copy(dest);

    // Merge the immigrants to the copy of the destination population
    for (population::size_type i  = 0; i < rate_limit; ++i) {
        pop_copy.push_back(filtered_immigrants[i].cur_x);
    }

    // Population fronts stored as indices of individuals.
    std::vector< std::vector<population::size_type> > fronts_i = pop_copy.compute_pareto_fronts();

    // Population fronts stored as fitness vectors of individuals.
    std::vector< std::vector<fitness_vector> > fronts_f (fronts_i.size());

    // Nadir point is established manually later, first point is a first "safe" candidate.
    fitness_vector refpoint(pop_copy.get_individual(0).cur_f);

    // Fill fronts_f with fitness vectors and establish the nadir point
    for (unsigned int f_idx = 0 ; f_idx < fronts_i.size() ; ++f_idx) {
        fronts_f[f_idx].resize(fronts_i[f_idx].size());
        for (unsigned int p_idx = 0 ; p_idx < fronts_i[f_idx].size() ; ++p_idx) {
            fronts_f[f_idx][p_idx] = fitness_vector(pop_copy.get_individual(fronts_i[f_idx][p_idx]).cur_f);

            // Update the nadir point manually for efficiency.
            for (unsigned int d_idx = 0 ; d_idx < fronts_f[f_idx][p_idx].size() ; ++d_idx) {
                refpoint[d_idx] = std::max(refpoint[d_idx], fronts_f[f_idx][p_idx][d_idx]);
            }
        }
    }

    // Epsilon is added to nadir point
    for (unsigned int d_idx = 0 ; d_idx < refpoint.size() ; ++d_idx) {
        refpoint[d_idx] += m_nadir_eps;
    }

    // Vector for maintaining the original indices of points for augmented population as 0 and 1
    std::vector<unsigned int> g_orig_indices(pop_copy.size(), 1);

    unsigned int no_discarded_immigrants = 0;

    // Store which front we process (start with the last front) and the number of processed individuals.
    unsigned int front_idx = fronts_i.size(); // front_idx is equal to the size, since it's decremented right in the main loop
    unsigned int processed_individuals = 0;

    // Pairs of (islander index, islander exclusive hypervolume)
    // Second item is updated later
    std::vector<std::pair<unsigned int, double> > discarded_islanders;

    std::vector<std::pair<unsigned int, double> > point_pairs;
    // index of currently processed point in the point_pair vector.
    // Initiated to its size (=0) in order to enforce the initial computation on penultimate front.
    unsigned int current_point = point_pairs.size();

    // Stops when we reduce the augmented population to the size of the original population or when the number of discarded islanders reaches the limit
    while (processed_individuals < filtered_immigrants.size() && discarded_islanders.size() < rate_limit) {

        // if current front was exhausted, load next one
        if (current_point == point_pairs.size()) {
            --front_idx;

            // Compute contributions
            std::vector<double> c;

            // If there exist a dominated front for front at index front_idx
            if (front_idx + 1 < fronts_f.size()) {
                std::vector<fitness_vector> merged_front;
                // Reserve the memory and copy the fronts
                merged_front.reserve(fronts_f[front_idx].size() + fronts_f[front_idx + 1].size());

                copy(fronts_f[front_idx].begin(), fronts_f[front_idx].end(), back_inserter(merged_front));
                copy(fronts_f[front_idx + 1].begin(), fronts_f[front_idx +1].end(), back_inserter(merged_front));

                hypervolume hv(merged_front, false);
                c = hv.contributions(refpoint);
            } else {
                hypervolume hv(fronts_f[front_idx], false);
                c = hv.contributions(refpoint);
            }

            // Initiate the pairs and sort by second item (exclusive volume)
            point_pairs.resize(fronts_f[front_idx].size());
            for(unsigned int i = 0 ; i < fronts_f[front_idx].size() ; ++i) {
                point_pairs[i] = std::make_pair(i, c[i]);
            }
            current_point = 0;
            std::sort(point_pairs.begin(), point_pairs.end(), sort_point_pairs_asc);
        }

        unsigned int orig_lc_idx = fronts_i[front_idx][point_pairs[current_point].first];

        if (orig_lc_idx < dest.size()) {
            discarded_islanders.push_back(std::make_pair(orig_lc_idx, 0.0));
        } else {
            ++no_discarded_immigrants;
        }

        // Flag given individual as discarded
        g_orig_indices[orig_lc_idx] = 0;

        ++processed_individuals;
        ++current_point;
    }

    // Number of non-discarded immigrants
    unsigned int no_available_immigrants = boost::numeric_cast<unsigned int>(filtered_immigrants.size() - no_discarded_immigrants);

    // Pairs of (immigrant index, immigrant exclusive hypervolume)
    // Second item is updated later
    std::vector<std::pair<unsigned int, double> > available_immigrants;
    available_immigrants.reserve(no_available_immigrants);
    for(unsigned int idx = dest.size() ; idx < pop_copy.size() ; ++idx) {
        // If the immigrant was not discarded add it to the available set
        if ( g_orig_indices[idx] == 1 ) {
            available_immigrants.push_back(std::make_pair(idx, 0.0));
        }
    }

    // Aggregate all points to establish the hypervolume contribution of available immigrants and discarded islanders
    std::vector<fitness_vector> merged_fronts;
    merged_fronts.reserve(pop_copy.size());

    for(unsigned int idx = 0 ; idx < pop_copy.size() ; ++idx) {
        merged_fronts.push_back(pop_copy.get_individual(idx).cur_f);
    }

    hypervolume hv(merged_fronts, false);
    std::vector<std::pair<unsigned int, double> >::iterator it;

    for(it = available_immigrants.begin() ; it != available_immigrants.end() ; ++it) {
        (*it).second = hv.exclusive((*it).first, refpoint);
    }

    for(it = discarded_islanders.begin() ; it != discarded_islanders.end() ; ++it) {
        (*it).second = hv.exclusive((*it).first, refpoint);
    }

    // Sort islanders and immigrants according to exclusive hypervolume
    sort(available_immigrants.begin(), available_immigrants.end(), hv_fair_r_policy::ind_cmp);
    sort(discarded_islanders.begin(), discarded_islanders.end(), hv_fair_r_policy::ind_cmp);

    // Number of exchanges is the minimum of the number of non discarded immigrants and the number of discarded islanders
    unsigned int no_exchanges = std::min(boost::numeric_cast<unsigned int>(available_immigrants.size()), boost::numeric_cast<unsigned int>(discarded_islanders.size()));

    it = available_immigrants.begin();
    std::vector<std::pair<unsigned int, double> >::reverse_iterator r_it = discarded_islanders.rbegin();

    // Match the best immigrant (forward iterator) with the worst islander (reverse iterator) no_exchanges times.
    for(unsigned int i = 0 ; i < no_exchanges ; ++i) {
        // Break if any islander is better than an immigrant
        if ((*r_it).second > (*it).second) {
            break;
        }
        // Push the pair (islander_idx, fixed_immigrant_idx) to the results
        result.push_back(std::make_pair((*r_it).first, original_immigrant_indices[(*it).first - dest.size()]));
        ++r_it;
        ++it;
    }

    return result;
}
Пример #13
0
void bee_colony::evolve(population &pop) const
{
	// Let's store some useful variables.
	const problem::base &prob = pop.problem();
	const problem::base::size_type prob_i_dimension = prob.get_i_dimension(), D = prob.get_dimension(), Dc = D - prob_i_dimension, prob_c_dimension = prob.get_c_dimension();
	const decision_vector &lb = prob.get_lb(), &ub = prob.get_ub();
	const population::size_type NP = (int) pop.size();

	//We perform some checks to determine wether the problem/population are suitable for ABC
	if ( Dc == 0 ) {
		pagmo_throw(value_error,"There is no continuous part in the problem decision vector for ABC to optimise");
	}

	if ( prob.get_f_dimension() != 1 ) {
		pagmo_throw(value_error,"The problem is not single objective and ABC is not suitable to solve it");
	}

	if ( prob_c_dimension != 0 ) {
		pagmo_throw(value_error,"The problem is not box constrained and ABC is not suitable to solve it");
	}

	if (NP < 2) {
		pagmo_throw(value_error,"for ABC at least 2 individuals in the population are needed");
	}

	// Get out if there is nothing to do.
	if (m_iter == 0) {
		return;
	}

	// Some vectors used during evolution are allocated here.
	fitness_vector fnew(prob.get_f_dimension());
	decision_vector dummy(D,0);			//used for initialisation purposes
	std::vector<decision_vector > X(NP,dummy);	//set of food sources
	std::vector<fitness_vector> fit(NP);		//food sources fitness

	decision_vector temp_solution(D,0);

	std::vector<int> trial(NP,0);

	std::vector<double> probability(NP);

	population::size_type neighbour = 0;

	decision_vector::size_type param2change = 0;

	std::vector<double> selectionfitness(NP), cumsum(NP), cumsumTemp(NP);
	std::vector <population::size_type> selection(NP);


	double r = 0;

	// Copy the food sources position and their fitness
	for ( population::size_type i = 0; i<NP; i++ ) {
		X[i]	=	pop.get_individual(i).cur_x;
		fit[i]	=	pop.get_individual(i).cur_f;
	}

	// Main ABC loop
	for (int j = 0; j < m_iter; ++j) {
		//1- Send employed bees
		for (population::size_type ii = 0; ii< NP; ++ii) {
			//selects a random component (only of the continuous part) of the decision vector
			param2change = boost::uniform_int<decision_vector::size_type>(0,Dc-1)(m_urng);
			//randomly chose a solution to be used to produce a mutant solution of solution ii
			//randomly selected solution must be different from ii
			do{
				neighbour = boost::uniform_int<population::size_type>(0,NP-1)(m_urng);
			}
			while(neighbour == ii);

			//copy local solution into temp_solution (the whole decision_vector, also the integer part)
			for(population::size_type i=0; i<D; ++i) {
				temp_solution[i] = X[ii][i];
			}

			//mutate temp_solution
			temp_solution[param2change] = X[ii][param2change] + boost::uniform_real<double>(-1,1)(m_drng) * (X[ii][param2change] - X[neighbour][param2change]);

			//if generated parameter value is out of boundaries, it is shifted onto the boundaries*/
			if (temp_solution[param2change]<lb[param2change]) {
				temp_solution[param2change] = lb[param2change];
			}
			if (temp_solution[param2change]>ub[param2change]) {
				temp_solution[param2change] = ub[param2change];
			}

			//Calling void prob.objfun(fitness_vector,decision_vector) is more efficient as no memory allocation occur
			//A call to fitness_vector prob.objfun(decision_vector) allocates memory for the return value.
			prob.objfun(fnew,temp_solution);
			//If the new solution is better than the old one replace it with the mutant one and reset its trial counter
			if(prob.compare_fitness(fnew, fit[ii])) {
				X[ii][param2change] = temp_solution[param2change];
				pop.set_x(ii,X[ii]);
				prob.objfun(fit[ii], X[ii]); //update the fitness vector
				trial[ii] = 0;
			}
			else {
				trial[ii]++; //if the solution can't be improved incrase its trial counter
			}
		} //End of loop on the population members

		//2 - Send onlooker bees
		//We scale all fitness values from 0 (worst) to absolute value of the best fitness
		fitness_vector worstfit=fit[0];
		for (pagmo::population::size_type i = 1; i < NP;i++) {
			if (prob.compare_fitness(worstfit,fit[i])) worstfit=fit[i];
		}

		for (pagmo::population::size_type i = 0; i < NP; i++) {
			selectionfitness[i] = fabs(worstfit[0] - fit[i][0]) + 1.;
		}

		// We build and normalise the cumulative sum
		cumsumTemp[0] = selectionfitness[0];
		for (pagmo::population::size_type i = 1; i< NP; i++) {
			cumsumTemp[i] = cumsumTemp[i - 1] + selectionfitness[i];
		}
		for (pagmo::population::size_type i = 0; i < NP; i++) {
			cumsum[i] = cumsumTemp[i]/cumsumTemp[NP-1];
		}

		for (pagmo::population::size_type i = 0; i < NP; i++) {
			r = m_drng();
			for (pagmo::population::size_type j = 0; j < NP; j++) {
				if (cumsum[j] > r) {
					selection[i]=j;
					break;
				}
			}
		}

		for(pagmo::population::size_type t = 0; t < NP; ++t) {
			r = m_drng();
			pagmo::population::size_type ii = selection[t];
			//selects a random component (only of the continuous part) of the decision vector
			param2change = boost::uniform_int<decision_vector::size_type>(0,Dc-1)(m_urng);
			//randomly chose a solution to be used to produce a mutant solution of solution ii
			//randomly selected solution must be different from ii
			do{
				neighbour = boost::uniform_int<population::size_type>(0,NP-1)(m_urng);
			}
			while(neighbour == ii);

			//copy local solution into temp_solution (also integer part)
			for(population::size_type i=0; i<D; ++i) {
				temp_solution[i] = X[ii][i];
			}

			//mutate temp_solution
			temp_solution[param2change] = X[ii][param2change] + boost::uniform_real<double>(-1,1)(m_drng) * (X[ii][param2change] - X[neighbour][param2change]);

			/*if generated parameter value is out of boundaries, it is shifted onto the boundaries*/
			if (temp_solution[param2change]<lb[param2change]) {
				temp_solution[param2change] = lb[param2change];
			}
			if (temp_solution[param2change]>ub[param2change]) {
				temp_solution[param2change] = ub[param2change];
			}

			//Calling void prob.objfun(fitness_vector,decision_vector) is more efficient as no memory allocation occur
			//A call to fitness_vector prob.objfun(decision_vector) allocates memory for the return value.
			prob.objfun(fnew,temp_solution);
			//If the new solution is better than the old one replace it with the mutant one and reset its trial counter
			if(prob.compare_fitness(fnew, fit[ii])) {
				X[ii][param2change] = temp_solution[param2change];
				pop.set_x(ii,X[ii]);
				prob.objfun(fit[ii], X[ii]); //update the fitness vector
				trial[ii] = 0;
			}
			else {
				trial[ii]++; //if the solution can't be improved incrase its  trial counter
			}
		}

		//3 - Send scout bees
		int maxtrialindex = 0;
		for (population::size_type ii=1; ii<NP; ++ii)
		{
			if (trial[ii] > trial[maxtrialindex]) {
				maxtrialindex = ii;
			}
		}
		if(trial[maxtrialindex] >= m_limit)
		{
			//select a new random solution
			for(problem::base::size_type jj = 0; jj < Dc; ++jj) {
				X[maxtrialindex][jj] = boost::uniform_real<double>(lb[jj],ub[jj])(m_drng);
			}
			trial[maxtrialindex] = 0;
			pop.set_x(maxtrialindex,X[maxtrialindex]);
		}




	} // end of main ABC loop

}
Пример #14
0
/**
 * Run the CORE algorithm
 *
 * @param[in,out] pop input/output pagmo::population to be evolved.
 */
void cstrs_core::evolve(population &pop) const
{	
	// store useful variables
	const problem::base &prob = pop.problem();
	const population::size_type pop_size = pop.size();
	const problem::base::size_type prob_dimension = prob.get_dimension();

	// get the constraints dimension
	problem::base::c_size_type prob_c_dimension = prob.get_c_dimension();

	//We perform some checks to determine wether the problem/population are suitable for CORE
	if(prob_c_dimension < 1) {
		pagmo_throw(value_error,"The problem is not constrained and CORE is not suitable to solve it");
	}
	if(prob.get_f_dimension() != 1) {
		pagmo_throw(value_error,"The problem is multiobjective and CORE is not suitable to solve it");
	}

	// Get out if there is nothing to do.
	if(pop_size == 0) {
		return;
	}

	// generates the unconstrained problem
	problem::con2uncon prob_unconstrained(prob);

	// associates the population to this problem
	population pop_uncon(prob_unconstrained);

	// fill this unconstrained population
	pop_uncon.clear();
	for(population::size_type i=0; i<pop_size; i++) {
		pop_uncon.push_back(pop.get_individual(i).cur_x);
	}

	// vector containing the infeasibles positions
	std::vector<population::size_type> pop_infeasibles;

	// Main CORE loop
	for(int k=0; k<m_gen; k++) {

		if(k%m_repair_frequency == 0) {
			pop_infeasibles.clear();

			// get the infeasible individuals
			for(population::size_type i=0; i<pop_size; i++) {
				if(!prob.feasibility_c(pop.get_individual(i).cur_c)) {
					pop_infeasibles.push_back(i);
				}
			}

			// random shuffle of infeasibles?
			population::size_type number_of_repair = (population::size_type)(m_repair_ratio * pop_infeasibles.size());

			// repair the infeasible individuals
			for(population::size_type i=0; i<number_of_repair; i++) {
				const population::size_type &current_individual_idx = pop_infeasibles.at(i);

                pop.repair(current_individual_idx, m_repair_algo);
			}

			// the population is repaired, it can be now used in the new unconstrained population
			// only the repaired individuals are put back in the population
			for(population::size_type i=0; i<number_of_repair; i++) {
				population::size_type current_individual_idx = pop_infeasibles.at(i);
				pop_uncon.set_x(current_individual_idx, pop.get_individual(current_individual_idx).cur_x);
			}
		}

		m_original_algo->evolve(pop_uncon);

		// push back the population in the main problem
		pop.clear();
		for(population::size_type i=0; i<pop_size; i++) {
			pop.push_back(pop_uncon.get_individual(i).cur_x);
		}

		// Check the exit conditions (every 40 generations, just as DE)
		if(k % 40 == 0) {
			decision_vector tmp(prob_dimension);

			double dx = 0;
			for(decision_vector::size_type i=0; i<prob_dimension; i++) {
				tmp[i] = pop.get_individual(pop.get_worst_idx()).best_x[i] - pop.get_individual(pop.get_best_idx()).best_x[i];
				dx += std::fabs(tmp[i]);
			}

			if(dx < m_xtol ) {
				if (m_screen_output) {
					std::cout << "Exit condition -- xtol < " << m_xtol << std::endl;
				}
				break;
			}

			double mah = std::fabs(pop.get_individual(pop.get_worst_idx()).best_f[0] - pop.get_individual(pop.get_best_idx()).best_f[0]);

			if(mah < m_ftol) {
				if(m_screen_output) {
					std::cout << "Exit condition -- ftol < " << m_ftol << std::endl;
				}
				break;
			}

			// outputs current values
			if(m_screen_output) {
				std::cout << "Generation " << k << " ***" << std::endl;
				std::cout << "    Best global fitness: " << pop.champion().f << std::endl;
				std::cout << "    xtol: " << dx << ", ftol: " << mah << std::endl;
				std::cout << "    xtol: " << dx << ", ftol: " << mah << std::endl;
			}
		}
	}
}
Пример #15
0
void cs::evolve(population &pop) const
{
	// Let's store some useful variables.
	const problem::base &prob = pop.problem();
	const problem::base::size_type D = prob.get_dimension(), prob_i_dimension = prob.get_i_dimension(), prob_c_dimension = prob.get_c_dimension(), prob_f_dimension = prob.get_f_dimension();
	const decision_vector &lb = prob.get_lb(), &ub = prob.get_ub();
	const population::size_type NP = pop.size();
	const problem::base::size_type Dc = D - prob_i_dimension;


	//We perform some checks to determine whether the problem/population are suitable for compass search
	if ( Dc == 0 ) {
		pagmo_throw(value_error,"There is no continuous part in the problem decision vector for compass search to optimise");
	}

	if ( prob_c_dimension != 0 ) {
		pagmo_throw(value_error,"The problem is not box constrained and compass search is not suitable to solve it");
	}

	if ( prob_f_dimension != 1 ) {
		pagmo_throw(value_error,"The problem is not single objective and compass search is not suitable to solve it");
	}

	// Get out if there is nothing to do.
	if (NP == 0 || m_max_eval == 0) {
		return;
	}

	//Starting point is the best individual
	const int bestidx = pop.get_best_idx();
	const decision_vector &x0 = pop.get_individual(bestidx).cur_x;
	const fitness_vector &fit0 = pop.get_individual(bestidx).cur_f;

	decision_vector x=x0,newx;
	fitness_vector f=fit0,newf=fit0;
	bool flag = false;
	int eval=0;

	double newrange=m_start_range;

	while (newrange > m_stop_range && eval <= m_max_eval) {
		flag = false;
		for (unsigned int i=0; i<Dc; i++) {
			newx=x;

			//move up
			newx[i] = x[i] + newrange * (ub[i]-lb[i]);
			//feasibility correction
			if (newx[i] > ub [i]) newx[i]=ub[i];

			prob.objfun(newf,newx); eval++;
			if (prob.compare_fitness(newf,f)) {
				f = newf;
				x = newx;
				flag=true;
				break; //accept
			}

			//move down
			newx[i] = x[i] - newrange * (ub[i]-lb[i]);
			//feasibility correction
			if (newx[i] < lb [i]) newx[i]=lb[i];

			prob.objfun(newf,newx); eval++;
			if (prob.compare_fitness(newf,f)) {  //accept
				f = newf;
				x = newx;
				flag=true;
				break;
			}
		}
		if (!flag) {
			newrange *= m_reduction_coeff;
		}
	} //end while
	std::transform(x.begin(), x.end(), pop.get_individual(bestidx).cur_x.begin(), newx.begin(),std::minus<double>()); // newx is now velocity
	pop.set_x(bestidx,x); //new evaluation is possible here......
	pop.set_v(bestidx,newx);
}
Пример #16
0
/**
 * Updates the penalty coefficients with the given population.
 * @param[in] population pop.
 */
void cstrs_self_adaptive::update_penalty_coeff(const population &pop)
{
	if(*m_original_problem != pop.problem()) {
		pagmo_throw(value_error,"The problem linked to the population is not the same as the problem given in argument.");
	}

	// Let's store some useful variables.
	const population::size_type pop_size = pop.size();

	// Get out if there is nothing to do.
	if (pop_size == 0) {
		return;
	}

	m_map_fitness.clear();
	m_map_constraint.clear();
	// store f and c in maps depending on the the x hash
	for(population::size_type i=0; i<pop_size; i++) {
		const population::individual_type &current_individual = pop.get_individual(i);
		// m_map_fitness.insert(std::pair<std::size_t, fitness_vector>(m_decision_vector_hash(current_individual.cur_x),current_individual.cur_f));
		m_map_fitness[m_decision_vector_hash(current_individual.cur_x)]=current_individual.cur_f;
		m_map_constraint[m_decision_vector_hash(current_individual.cur_x)]=current_individual.cur_c;
	}

	std::vector<population::size_type> feasible_idx(0);
	std::vector<population::size_type> infeasible_idx(0);

	// store indexes of feasible and non feasible individuals
	for(population::size_type i=0; i<pop_size; i++) {
		const population::individual_type &current_individual = pop.get_individual(i);

		if(m_original_problem->feasibility_c(current_individual.cur_c)) {
			feasible_idx.push_back(i);
		} else {
			infeasible_idx.push_back(i);
		}
	}

	// if the population is only feasible, then nothing is done
	if(infeasible_idx.size() == 0) {
		update_c_scaling(pop);		
		m_apply_penalty_1 = false;
		m_scaling_factor = 0.;
		return;
	}
	m_apply_penalty_1 = false;
	m_scaling_factor = 0.;

	// updates the c_scaling, needed for solution infeasibility computation
	update_c_scaling(pop);

	// evaluate solutions infeasibility
	//compute_pop_solution_infeasibility(solution_infeasibility, pop);

	std::vector<double> solution_infeasibility(pop_size);
	std::fill(solution_infeasibility.begin(),solution_infeasibility.end(),0.);

	// evaluate solutions infeasibility
	solution_infeasibility.resize(pop_size);
	std::fill(solution_infeasibility.begin(),solution_infeasibility.end(),0.);

	for(population::size_type i=0; i<pop_size; i++) {
		const population::individual_type &current_individual = pop.get_individual(i);

		// compute the infeasibility of the constraint
		solution_infeasibility[i] = compute_solution_infeasibility(current_individual.cur_c);
	}

	// search position of x_hat_down, x_hat_up and x_hat_round
	population::size_type hat_down_idx = -1;
	population::size_type hat_up_idx = -1;
	population::size_type hat_round_idx = -1;

	// first case, the population contains at least one feasible solution
	if(feasible_idx.size() > 0) {
		// initialize hat_down_idx
		hat_down_idx = feasible_idx.at(0);

		// x_hat_down = feasible individual with lowest objective value in p
		for(population::size_type i=0; i<feasible_idx.size(); i++) {
			const population::size_type current_idx = feasible_idx.at(i);
			const population::individual_type &current_individual = pop.get_individual(current_idx);

			if(m_original_problem->compare_fitness(current_individual.cur_f, pop.get_individual(hat_down_idx).cur_f)) {
				hat_down_idx = current_idx;
			}
		}

		// hat down is now available
		fitness_vector f_hat_down = pop.get_individual(hat_down_idx).cur_f;

		// x_hat_up value depends if the population contains infeasible individual with objective
		// function better than f_hat_down
		bool pop_contains_infeasible_f_better_x_hat_down = false;
		for(population::size_type i=0; i<infeasible_idx.size(); i++) {
			const population::size_type current_idx = infeasible_idx.at(i);
			const population::individual_type &current_individual = pop.get_individual(current_idx);

			if(m_original_problem->compare_fitness(current_individual.cur_f, f_hat_down)) {
				pop_contains_infeasible_f_better_x_hat_down = true;

				// initialize hat_up_idx
				hat_up_idx = current_idx;

				break;
			}
		}

		if(pop_contains_infeasible_f_better_x_hat_down) {
			// hat_up_idx is already initizalized

			// gets the individual with maximum infeasibility and objfun lower than f_hat_down
			for(population::size_type i=0; i<infeasible_idx.size(); i++) {
				const population::size_type current_idx = infeasible_idx.at(i);
				const population::individual_type &current_individual = pop.get_individual(current_idx);

				if(m_original_problem->compare_fitness(current_individual.cur_f, f_hat_down) &&
					(solution_infeasibility.at(current_idx) >= solution_infeasibility.at(hat_up_idx)) ) {

					if(solution_infeasibility.at(current_idx) == solution_infeasibility.at(hat_up_idx)) {
						if(m_original_problem->compare_fitness(current_individual.cur_f, pop.get_individual(hat_up_idx).cur_f)) {
							hat_up_idx = current_idx;
						}
					} else {
						hat_up_idx = current_idx;
					}
				}
			}

			// apply penalty 1
			m_apply_penalty_1 = true;

		} else {
			// all the infeasible soutions have an objective function value greater than f_hat_down
			// the worst is the one that has the maximum infeasibility
			// initialize hat_up_idx
			hat_up_idx = infeasible_idx.at(0);

			for(population::size_type i=0; i<infeasible_idx.size(); i++) {
				const population::size_type current_idx = infeasible_idx.at(i);
				const population::individual_type &current_individual = pop.get_individual(current_idx);

				if(solution_infeasibility.at(current_idx) >= solution_infeasibility.at(hat_up_idx)) {
					if(solution_infeasibility.at(current_idx) == solution_infeasibility.at(hat_up_idx)) {
						if(m_original_problem->compare_fitness(pop.get_individual(hat_up_idx).cur_f, current_individual.cur_f)) {
							hat_up_idx = current_idx;
						}
					} else {
						hat_up_idx = current_idx;
					}
				}
			}

			// do not apply penalty 1
			m_apply_penalty_1 = false;
		}

	} else { // case where there is no feasible solution in the population
		// best is the individual with the lowest infeasibility
		hat_down_idx = 0;
		hat_up_idx = 0;

		for(population::size_type i=0; i<pop_size; i++) {
			const population::individual_type &current_individual = pop.get_individual(i);

			if(solution_infeasibility.at(i) <= solution_infeasibility.at(hat_down_idx)) {
				if(solution_infeasibility.at(i) == solution_infeasibility.at(hat_down_idx)) {
					if(m_original_problem->compare_fitness(current_individual.cur_f, pop.get_individual(hat_down_idx).cur_f)) {
						hat_down_idx = i;
					}
				} else {
					hat_down_idx = i;
				}
			}
		}

		// worst individual
		for(population::size_type i=0; i<pop_size; i++) {
			const population::individual_type &current_individual = pop.get_individual(i);

			if(solution_infeasibility.at(i) >= solution_infeasibility.at(hat_up_idx)) {
				if(solution_infeasibility.at(i) == solution_infeasibility.at(hat_up_idx)) {
					if(m_original_problem->compare_fitness(pop.get_individual(hat_up_idx).cur_f, current_individual.cur_f)) {
						hat_up_idx = i;
					}
				} else {
					hat_up_idx = i;
				}
			}
		}

		// apply penalty 1 to the population
		m_apply_penalty_1 = true;
	}

	// stores the hat round idx, i.e. the solution with highest objective
	// function value in the population
	hat_round_idx = 0;
	for(population::size_type i=0; i<pop_size; i++) {
		const population::individual_type &current_individual = pop.get_individual(i);

		if(m_original_problem->compare_fitness(pop.get_individual(hat_round_idx).cur_f, current_individual.cur_f)) {
			hat_round_idx = i;
		}
	}

	// get the objective function values of the three individuals
	m_f_hat_round = pop.get_individual(hat_round_idx).cur_f;
	m_f_hat_down =  pop.get_individual(hat_down_idx).cur_f;
	m_f_hat_up = pop.get_individual(hat_up_idx).cur_f;

	// get the solution infeasibility values of the three individuals
	m_i_hat_round = solution_infeasibility.at(hat_round_idx);
	m_i_hat_down = solution_infeasibility.at(hat_down_idx);
	m_i_hat_up = solution_infeasibility.at(hat_up_idx);

	// computes the scaling factor
	m_scaling_factor = 0.;
	// evaluates scaling factor
	if(m_original_problem->compare_fitness(m_f_hat_down, m_f_hat_up)) {
		m_scaling_factor = (m_f_hat_round[0] - m_f_hat_up[0]) / m_f_hat_up[0];
	} else {
		m_scaling_factor = (m_f_hat_round[0] - m_f_hat_down[0]) / m_f_hat_down[0];
	}
	if(m_f_hat_up[0] == m_f_hat_round[0]) {
		m_scaling_factor = 0.;
	}

}
Пример #17
0
void snopt::evolve(population &pop) const
{
    // Let's store some useful variables.
    const problem::base &prob = pop.problem();
    const problem::base::size_type D = prob.get_dimension(), prob_i_dimension = prob.get_i_dimension(), prob_c_dimension = prob.get_c_dimension(), prob_f_dimension = prob.get_f_dimension();
    const decision_vector &lb = prob.get_lb(), &ub = prob.get_ub();
    const population::size_type NP = pop.size();
    const problem::base::size_type Dc = D - prob_i_dimension;
    const std::vector<double>::size_type D_ineqc = prob.get_ic_dimension();
    const std::vector<double>::size_type D_eqc = prob_c_dimension - D_ineqc;
    const std::string name = prob.get_name();

    //We perform some checks to determine wether the problem/population are suitable for SNOPT
    if ( prob_i_dimension != 0  ) {
        pagmo_throw(value_error,"No integer part allowed yet....");
    }

    if ( Dc == 0  ) {
        pagmo_throw(value_error,"No continuous part....");
    }

    if ( prob_f_dimension != 1 ) {
        pagmo_throw(value_error,"The problem is not single objective and SNOPT is not suitable to solve it");
    }

    // Get out if there is nothing to do.
    if (NP == 0 || m_major == 0) {
        return;
    }

    // We allocate memory for the decision vector that will be used in the snopt_function_
    di_comodo.x.resize(Dc);
    di_comodo.c.resize(prob_c_dimension);
    di_comodo.f.resize(prob_f_dimension);


    // We construct a SnoptProblem_PAGMO passing the pointers to the problem and the allocated
    //memory area for the di_comodo vector
    snoptProblem_PAGMO SnoptProblem(prob, &di_comodo);

    // Allocate and initialize;
    integer n     =  Dc;

    // Box-constrained non-linear optimization
    integer neF   =  1 + prob_c_dimension;

    //Memory sizing of A
    integer lenA  = Dc * (1 + prob_c_dimension); //overestimate
    integer *iAfun = new integer[lenA];
    integer *jAvar = new integer[lenA];
    doublereal *A  = new doublereal[lenA];


    //Memory sizing of G
    int lenG =Dc * (1 + prob_c_dimension); //overestimate
    integer *iGfun = new integer[lenG];
    integer *jGvar = new integer[lenG];



    //Decision vector memory allocation
    doublereal *x      = new doublereal[n];
    doublereal *xlow   = new doublereal[n];
    doublereal *xupp   = new doublereal[n];
    doublereal *xmul   = new doublereal[n];
    integer    *xstate = new    integer[n];

    //Objective function memory allocation
    doublereal *F      = new doublereal[neF];
    doublereal *Flow   = new doublereal[neF];
    doublereal *Fupp   = new doublereal[neF];
    doublereal *Fmul   = new doublereal[neF];
    integer    *Fstate = new integer[neF];

    integer nxnames = 1;
    integer nFnames = 1;
    char *xnames = new char[nxnames*8];
    char *Fnames = new char[nFnames*8];

    integer    ObjRow = 0;
    doublereal ObjAdd = 0;

    // Set the upper and lower bounds. And The initial Guess
    int bestidx = pop.get_best_idx();
    for (pagmo::problem::base::size_type i = 0; i < Dc; i++) {
        xlow[i]   = lb[i];
        xupp[i]   = ub[i];
        xstate[i] =    0;
        x[i] = pop.get_individual(bestidx).cur_x[i];
    }

    // Set the bounds for objective, equality and inequality constraints
    // 1 - Objective function
    Flow[0] = -std::numeric_limits<double>::max();
    Fupp[0] = std::numeric_limits<double>::max();
    F[0] = pop.get_individual(bestidx).cur_f[0];
    // 2 - Equality constraints
    for (pagmo::problem::base::size_type i=0; i<D_eqc; ++i) {
        Flow[i+1] = 0;
        Fupp[i+1] = 0;
    }
    // 3 - Inequality constraints
    for (pagmo::problem::base::size_type i=0; i<D_ineqc; ++i) {
        Flow[i+1+D_eqc] = -std::numeric_limits<double>::max();
        Fupp[i+1+D_eqc] = 0;
    }

    // Load the data for SnoptProblem ...
    SnoptProblem.setProblemSize( n, neF );
    SnoptProblem.setNeG( lenG );
    SnoptProblem.setNeA( lenA );
    SnoptProblem.setA          ( lenA, iAfun, jAvar, A );
    SnoptProblem.setG          ( lenG, iGfun, jGvar );
    SnoptProblem.setObjective  ( ObjRow, ObjAdd );
    SnoptProblem.setX          ( x, xlow, xupp, xmul, xstate );
    SnoptProblem.setF          ( F, Flow, Fupp, Fmul, Fstate );
    SnoptProblem.setXNames     ( xnames, nxnames );
    SnoptProblem.setFNames     ( Fnames, nFnames );
    SnoptProblem.setProbName   ( name.c_str() ); //This is limited to be 8 characters!!!
    SnoptProblem.setUserFun    ( snopt_function_ );

    //We set some parameters
    if (m_screen_output) SnoptProblem.setIntParameter("Summary file",6);
    if (m_file_out)   SnoptProblem.setPrintFile   ( name.c_str() );
    SnoptProblem.setIntParameter ( "Derivative option", 0 );
    SnoptProblem.setIntParameter ( "Major iterations limit", m_major);
    SnoptProblem.setIntParameter ( "Iterations limit",100000);
    SnoptProblem.setRealParameter( "Major feasibility tolerance", m_feas);
    SnoptProblem.setRealParameter( "Major optimality tolerance", m_opt);


    //We set the sparsity structure
    int neG;
    try
    {
        std::vector<int> iGfun_vect, jGvar_vect;
        prob.set_sparsity(neG,iGfun_vect,jGvar_vect);
        for (int i=0; i < neG; i++)
        {
            iGfun[i] = iGfun_vect[i];
            jGvar[i] = jGvar_vect[i];
        }
        SnoptProblem.setNeG( neG );
        SnoptProblem.setNeA( 0 );
        SnoptProblem.setG( lenG, iGfun, jGvar );

    } //the user did implement the sparsity in the problem
    catch (not_implemented_error)
    {
        SnoptProblem.computeJac();
        neG = SnoptProblem.getNeG();
    } //the user did not implement the sparsity in the problem


    if (m_screen_output)
    {
        std::cout << "PaGMO 4 SNOPT:" << std::endl << std::endl;
        std::cout << "Sparsity pattern set, NeG = " << neG << std::endl;
        std::cout << "iGfun: [";
        for (int i=0; i<neG-1; ++i) std::cout << iGfun[i] << ",";
        std::cout << iGfun[neG-1] << "]" << std::endl;
        std::cout << "jGvar: [";
        for (int i=0; i<neG-1; ++i) std::cout << jGvar[i] << ",";
        std::cout << jGvar[neG-1] << "]" << std::endl;
    }

    integer Cold = 0;

    //HERE WE CALL snoptA routine!!!!!
    SnoptProblem.solve( Cold );

    //Save the final point making sure it is within the linear bounds
    std::copy(x,x+n,di_comodo.x.begin());
    decision_vector newx = di_comodo.x;
    std::transform(di_comodo.x.begin(), di_comodo.x.end(), pop.get_individual(bestidx).cur_x.begin(), di_comodo.x.begin(),std::minus<double>());
    for (integer i=0; i<n; i++)
    {
        newx[i] = std::min(std::max(lb[i],newx[i]),ub[i]);
    }

    pop.set_x(bestidx,newx);
    pop.set_v(bestidx,di_comodo.x);

    //Clean up memory allocated to call the snoptA routine
    delete []iAfun;
    delete []jAvar;
    delete []A;
    delete []iGfun;
    delete []jGvar;

    delete []x;
    delete []xlow;
    delete []xupp;
    delete []xmul;
    delete []xstate;

    delete []F;
    delete []Flow;
    delete []Fupp;
    delete []Fmul;
    delete []Fstate;

    delete []xnames;
    delete []Fnames;

}
Пример #18
0
std::vector<population::individual_type> hv_greedy_s_policy::select(population &pop) const
{
	// Fall back to best_s_policy when facing a single-objective problem.
	if (pop.problem().get_f_dimension() == 1) {
		return best_s_policy(m_rate, m_type).select(pop);
	}

	pagmo_assert(get_n_individuals(pop) <= pop.size());
	// Gets the number of individuals to select
	const population::size_type migration_rate = get_n_individuals(pop);
	// Create a temporary array of individuals.
	std::vector<population::individual_type> result;

	// Indices of fronts.
	std::vector< std::vector< population::size_type> > fronts_i = pop.compute_pareto_fronts();

	// Fitness vectors of individuals according to the indices above.
	std::vector< std::vector< fitness_vector> > fronts_f (fronts_i.size());

	// Nadir point is established manually later, first point is as a first "safe" candidate.
	fitness_vector refpoint(pop.get_individual(0).cur_f);

	for (unsigned int f_idx = 0 ; f_idx < fronts_i.size() ; ++f_idx) {
		fronts_f[f_idx].resize(fronts_i[f_idx].size());
		for (unsigned int p_idx = 0 ; p_idx < fronts_i[f_idx].size() ; ++p_idx) {
			fronts_f[f_idx][p_idx] = fitness_vector(pop.get_individual(fronts_i[f_idx][p_idx]).cur_f);

			// Update the nadir point manually for efficiency.
			for (unsigned int d_idx = 0 ; d_idx < fronts_f[f_idx][p_idx].size() ; ++d_idx) {
				refpoint[d_idx] = std::max(refpoint[d_idx], fronts_f[f_idx][p_idx][d_idx]);
			}
		}
	}

	// Epsilon is added to nadir point
	for (unsigned int d_idx = 0 ; d_idx < refpoint.size() ; ++d_idx) {
		refpoint[d_idx] += m_nadir_eps;
	}

	// Store which front we process (start with front 0) and the number of processed individuals.
	unsigned int front_idx = 0;
	unsigned int processed_individuals = 0;

	// Vector for maintaining the original indices of points
	std::vector<unsigned int> orig_indices;

	while (processed_individuals < migration_rate) {
		// If we need to pull every point from given front anyway, just push back the individuals right away
		if (fronts_f[front_idx].size() <= (migration_rate - processed_individuals)) {
			for(unsigned int i = 0 ; i < fronts_i[front_idx].size() ; ++i) {
				result.push_back(pop.get_individual(fronts_i[front_idx][i]));
			}

			processed_individuals += fronts_f[front_idx].size();
			++front_idx;
		} else {
			// Prepare the vector for the original indices
			if (orig_indices.size() == 0) {
				orig_indices.resize(fronts_i[front_idx].size());
				iota(orig_indices.begin(), orig_indices.end(), 0);
			}

			// Compute the greatest contributor
			hypervolume hv(fronts_f[front_idx], false);
			hv.set_copy_points(false);
			unsigned int gc_idx = hv.greatest_contributor(refpoint);
			result.push_back(pop.get_individual(fronts_i[front_idx][orig_indices[gc_idx]]));
			
			// Remove it from the front along with its index
			orig_indices.erase(orig_indices.begin() + gc_idx);
			fronts_f[front_idx].erase(fronts_f[front_idx].begin() + gc_idx);
			++processed_individuals;
		}
	}

	return result;
}