void ms::evolve(population &pop) const { // Let's store some useful variables. const population::size_type NP = pop.size(); // Get out if there is nothing to do. if (m_starts == 0 || NP == 0) { return; } // Local population used in the algorithm iterations. population working_pop(pop); //ms main loop for (int i=0; i< m_starts; ++i) { working_pop.reinit(); m_algorithm->evolve(working_pop); if (working_pop.problem().compare_fc(working_pop.get_individual(working_pop.get_best_idx()).cur_f,working_pop.get_individual(working_pop.get_best_idx()).cur_c, pop.get_individual(pop.get_worst_idx()).cur_f,pop.get_individual(pop.get_worst_idx()).cur_c ) ) { //update best population replacing its worst individual with the good one just produced. pop.set_x(pop.get_worst_idx(),working_pop.get_individual(working_pop.get_best_idx()).cur_x); pop.set_v(pop.get_worst_idx(),working_pop.get_individual(working_pop.get_best_idx()).cur_v); } if (m_screen_output) { std::cout << i << ". " << "\tCurrent iteration best: " << working_pop.get_individual(working_pop.get_best_idx()).cur_f << "\tOverall champion: " << pop.champion().f << std::endl; } } }
void sa_corana::evolve(population &pop) const { // Let's store some useful variables. const problem::base &prob = pop.problem(); const problem::base::size_type D = prob.get_dimension(), prob_i_dimension = prob.get_i_dimension(), prob_c_dimension = prob.get_c_dimension(), prob_f_dimension = prob.get_f_dimension(); const decision_vector &lb = prob.get_lb(), &ub = prob.get_ub(); const population::size_type NP = pop.size(); const problem::base::size_type Dc = D - prob_i_dimension; //We perform some checks to determine wether the problem/population are suitable for sa_corana if ( Dc == 0 ) { pagmo_throw(value_error,"There is no continuous part in the problem decision vector for sa_corana to optimise"); } if ( prob_c_dimension != 0 ) { pagmo_throw(value_error,"The problem is not box constrained and sa_corana is not suitable to solve it"); } if ( prob_f_dimension != 1 ) { pagmo_throw(value_error,"The problem is not single objective and sa_corana is not suitable to solve it"); } //Determines the number of temperature adjustment for the annealing procedure const size_t n_T = m_niter / (m_step_adj * m_bin_size * Dc); // Get out if there is nothing to do. if (NP == 0 || m_niter == 0) { return; } if (n_T == 0) { pagmo_throw(value_error,"n_T is zero, increase niter"); } //Starting point is the best individual const int bestidx = pop.get_best_idx(); const decision_vector &x0 = pop.get_individual(bestidx).cur_x; const fitness_vector &fit0 = pop.get_individual(bestidx).cur_f; //Determines the coefficient to dcrease the temperature const double Tcoeff = std::pow(m_Tf/m_Ts,1.0/(double)(n_T)); //Stores the current and new points decision_vector xNEW = x0, xOLD = xNEW; fitness_vector fNEW = fit0, fOLD = fNEW; //Stores the adaptive steps of each component (integer part included but not used) decision_vector step(D,m_range); //Stores the number of accepted points per component (integer part included but not used) std::vector<int> acp(D,0) ; double ratio = 0, currentT = m_Ts, probab = 0; //Main SA loops for (size_t jter = 0; jter < n_T; ++jter) { for (int mter = 0; mter < m_step_adj; ++mter) { for (int kter = 0; kter < m_bin_size; ++kter) { size_t nter = boost::uniform_int<int>(0,Dc-1)(m_urng); for (size_t numb = 0; numb < Dc ; ++numb) { nter = (nter + 1) % Dc; //We modify the current point actsol by mutating its nter component within //a step that we will later adapt xNEW[nter] = xOLD[nter] + boost::uniform_real<double>(-1,1)(m_drng) * step[nter] * (ub[nter]-lb[nter]); // If new solution produced is infeasible ignore it if ((xNEW[nter] > ub[nter]) || (xNEW[nter] < lb[nter])) { xNEW[nter]=xOLD[nter]; continue; } //And we valuate the objective function for the new point prob.objfun(fNEW,xNEW); // We decide wether to accept or discard the point if (prob.compare_fitness(fNEW,fOLD) ) { //accept xOLD[nter] = xNEW[nter]; fOLD = fNEW; acp[nter]++; //Increase the number of accepted values } else { //test it with Boltzmann to decide the acceptance probab = exp ( - fabs(fOLD[0] - fNEW[0] ) / currentT ); // we compare prob with a random probability. if (probab > m_drng()) { xOLD[nter] = xNEW[nter]; fOLD = fNEW; acp[nter]++; //Increase the number of accepted values } else { xNEW[nter] = xOLD[nter]; } } // end if } // end for(nter = 0; ... } // end for(kter = 0; ... // adjust the step (adaptively) for (size_t iter = 0; iter < Dc; ++iter) { ratio = (double)acp[iter]/(double)m_bin_size; acp[iter] = 0; //reset the counter if (ratio > .6) { //too many acceptances, increase the step by a factor 3 maximum step[iter] = step [iter] * (1 + 2 *(ratio - .6)/.4); } else { if (ratio < .4) { //too few acceptance, decrease the step by a factor 3 maximum step [iter]= step [iter] / (1 + 2 * ((.4 - ratio)/.4)); }; }; //And if it becomes too large, reset it to its initial value if ( step[iter] > m_range ) { step [iter] = m_range; }; } } // Cooling schedule currentT *= Tcoeff; } if ( prob.compare_fitness(fOLD,fit0) ){ pop.set_x(bestidx,xOLD); //new evaluation is possible here...... std::transform(xOLD.begin(), xOLD.end(), pop.get_individual(bestidx).cur_x.begin(), xOLD.begin(),std::minus<double>()); pop.set_v(bestidx,xOLD); } }
void snopt::evolve(population &pop) const { // Let's store some useful variables. const problem::base &prob = pop.problem(); const problem::base::size_type D = prob.get_dimension(), prob_i_dimension = prob.get_i_dimension(), prob_c_dimension = prob.get_c_dimension(), prob_f_dimension = prob.get_f_dimension(); const decision_vector &lb = prob.get_lb(), &ub = prob.get_ub(); const population::size_type NP = pop.size(); const problem::base::size_type Dc = D - prob_i_dimension; const std::vector<double>::size_type D_ineqc = prob.get_ic_dimension(); const std::vector<double>::size_type D_eqc = prob_c_dimension - D_ineqc; const std::string name = prob.get_name(); //We perform some checks to determine wether the problem/population are suitable for SNOPT if ( prob_i_dimension != 0 ) { pagmo_throw(value_error,"No integer part allowed yet...."); } if ( Dc == 0 ) { pagmo_throw(value_error,"No continuous part...."); } if ( prob_f_dimension != 1 ) { pagmo_throw(value_error,"The problem is not single objective and SNOPT is not suitable to solve it"); } // Get out if there is nothing to do. if (NP == 0 || m_major == 0) { return; } // We allocate memory for the decision vector that will be used in the snopt_function_ di_comodo.x.resize(Dc); di_comodo.c.resize(prob_c_dimension); di_comodo.f.resize(prob_f_dimension); // We construct a SnoptProblem_PAGMO passing the pointers to the problem and the allocated //memory area for the di_comodo vector snoptProblem_PAGMO SnoptProblem(prob, &di_comodo); // Allocate and initialize; integer n = Dc; // Box-constrained non-linear optimization integer neF = 1 + prob_c_dimension; //Memory sizing of A integer lenA = Dc * (1 + prob_c_dimension); //overestimate integer *iAfun = new integer[lenA]; integer *jAvar = new integer[lenA]; doublereal *A = new doublereal[lenA]; //Memory sizing of G int lenG =Dc * (1 + prob_c_dimension); //overestimate integer *iGfun = new integer[lenG]; integer *jGvar = new integer[lenG]; //Decision vector memory allocation doublereal *x = new doublereal[n]; doublereal *xlow = new doublereal[n]; doublereal *xupp = new doublereal[n]; doublereal *xmul = new doublereal[n]; integer *xstate = new integer[n]; //Objective function memory allocation doublereal *F = new doublereal[neF]; doublereal *Flow = new doublereal[neF]; doublereal *Fupp = new doublereal[neF]; doublereal *Fmul = new doublereal[neF]; integer *Fstate = new integer[neF]; integer nxnames = 1; integer nFnames = 1; char *xnames = new char[nxnames*8]; char *Fnames = new char[nFnames*8]; integer ObjRow = 0; doublereal ObjAdd = 0; // Set the upper and lower bounds. And The initial Guess int bestidx = pop.get_best_idx(); for (pagmo::problem::base::size_type i = 0; i < Dc; i++) { xlow[i] = lb[i]; xupp[i] = ub[i]; xstate[i] = 0; x[i] = pop.get_individual(bestidx).cur_x[i]; } // Set the bounds for objective, equality and inequality constraints // 1 - Objective function Flow[0] = -std::numeric_limits<double>::max(); Fupp[0] = std::numeric_limits<double>::max(); F[0] = pop.get_individual(bestidx).cur_f[0]; // 2 - Equality constraints for (pagmo::problem::base::size_type i=0; i<D_eqc; ++i) { Flow[i+1] = 0; Fupp[i+1] = 0; } // 3 - Inequality constraints for (pagmo::problem::base::size_type i=0; i<D_ineqc; ++i) { Flow[i+1+D_eqc] = -std::numeric_limits<double>::max(); Fupp[i+1+D_eqc] = 0; } // Load the data for SnoptProblem ... SnoptProblem.setProblemSize( n, neF ); SnoptProblem.setNeG( lenG ); SnoptProblem.setNeA( lenA ); SnoptProblem.setA ( lenA, iAfun, jAvar, A ); SnoptProblem.setG ( lenG, iGfun, jGvar ); SnoptProblem.setObjective ( ObjRow, ObjAdd ); SnoptProblem.setX ( x, xlow, xupp, xmul, xstate ); SnoptProblem.setF ( F, Flow, Fupp, Fmul, Fstate ); SnoptProblem.setXNames ( xnames, nxnames ); SnoptProblem.setFNames ( Fnames, nFnames ); SnoptProblem.setProbName ( name.c_str() ); //This is limited to be 8 characters!!! SnoptProblem.setUserFun ( snopt_function_ ); //We set some parameters if (m_screen_output) SnoptProblem.setIntParameter("Summary file",6); if (m_file_out) SnoptProblem.setPrintFile ( name.c_str() ); SnoptProblem.setIntParameter ( "Derivative option", 0 ); SnoptProblem.setIntParameter ( "Major iterations limit", m_major); SnoptProblem.setIntParameter ( "Iterations limit",100000); SnoptProblem.setRealParameter( "Major feasibility tolerance", m_feas); SnoptProblem.setRealParameter( "Major optimality tolerance", m_opt); //We set the sparsity structure int neG; try { std::vector<int> iGfun_vect, jGvar_vect; prob.set_sparsity(neG,iGfun_vect,jGvar_vect); for (int i=0; i < neG; i++) { iGfun[i] = iGfun_vect[i]; jGvar[i] = jGvar_vect[i]; } SnoptProblem.setNeG( neG ); SnoptProblem.setNeA( 0 ); SnoptProblem.setG( lenG, iGfun, jGvar ); } //the user did implement the sparsity in the problem catch (not_implemented_error) { SnoptProblem.computeJac(); neG = SnoptProblem.getNeG(); } //the user did not implement the sparsity in the problem if (m_screen_output) { std::cout << "PaGMO 4 SNOPT:" << std::endl << std::endl; std::cout << "Sparsity pattern set, NeG = " << neG << std::endl; std::cout << "iGfun: ["; for (int i=0; i<neG-1; ++i) std::cout << iGfun[i] << ","; std::cout << iGfun[neG-1] << "]" << std::endl; std::cout << "jGvar: ["; for (int i=0; i<neG-1; ++i) std::cout << jGvar[i] << ","; std::cout << jGvar[neG-1] << "]" << std::endl; } integer Cold = 0; //HERE WE CALL snoptA routine!!!!! SnoptProblem.solve( Cold ); //Save the final point making sure it is within the linear bounds std::copy(x,x+n,di_comodo.x.begin()); decision_vector newx = di_comodo.x; std::transform(di_comodo.x.begin(), di_comodo.x.end(), pop.get_individual(bestidx).cur_x.begin(), di_comodo.x.begin(),std::minus<double>()); for (integer i=0; i<n; i++) { newx[i] = std::min(std::max(lb[i],newx[i]),ub[i]); } pop.set_x(bestidx,newx); pop.set_v(bestidx,di_comodo.x); //Clean up memory allocated to call the snoptA routine delete []iAfun; delete []jAvar; delete []A; delete []iGfun; delete []jGvar; delete []x; delete []xlow; delete []xupp; delete []xmul; delete []xstate; delete []F; delete []Flow; delete []Fupp; delete []Fmul; delete []Fstate; delete []xnames; delete []Fnames; }
void cs::evolve(population &pop) const { // Let's store some useful variables. const problem::base &prob = pop.problem(); const problem::base::size_type D = prob.get_dimension(), prob_i_dimension = prob.get_i_dimension(), prob_c_dimension = prob.get_c_dimension(), prob_f_dimension = prob.get_f_dimension(); const decision_vector &lb = prob.get_lb(), &ub = prob.get_ub(); const population::size_type NP = pop.size(); const problem::base::size_type Dc = D - prob_i_dimension; //We perform some checks to determine whether the problem/population are suitable for compass search if ( Dc == 0 ) { pagmo_throw(value_error,"There is no continuous part in the problem decision vector for compass search to optimise"); } if ( prob_c_dimension != 0 ) { pagmo_throw(value_error,"The problem is not box constrained and compass search is not suitable to solve it"); } if ( prob_f_dimension != 1 ) { pagmo_throw(value_error,"The problem is not single objective and compass search is not suitable to solve it"); } // Get out if there is nothing to do. if (NP == 0 || m_max_eval == 0) { return; } //Starting point is the best individual const int bestidx = pop.get_best_idx(); const decision_vector &x0 = pop.get_individual(bestidx).cur_x; const fitness_vector &fit0 = pop.get_individual(bestidx).cur_f; decision_vector x=x0,newx; fitness_vector f=fit0,newf=fit0; bool flag = false; int eval=0; double newrange=m_start_range; while (newrange > m_stop_range && eval <= m_max_eval) { flag = false; for (unsigned int i=0; i<Dc; i++) { newx=x; //move up newx[i] = x[i] + newrange * (ub[i]-lb[i]); //feasibility correction if (newx[i] > ub [i]) newx[i]=ub[i]; prob.objfun(newf,newx); eval++; if (prob.compare_fitness(newf,f)) { f = newf; x = newx; flag=true; break; //accept } //move down newx[i] = x[i] - newrange * (ub[i]-lb[i]); //feasibility correction if (newx[i] < lb [i]) newx[i]=lb[i]; prob.objfun(newf,newx); eval++; if (prob.compare_fitness(newf,f)) { //accept f = newf; x = newx; flag=true; break; } } if (!flag) { newrange *= m_reduction_coeff; } } //end while std::transform(x.begin(), x.end(), pop.get_individual(bestidx).cur_x.begin(), newx.begin(),std::minus<double>()); // newx is now velocity pop.set_x(bestidx,x); //new evaluation is possible here...... pop.set_v(bestidx,newx); }