Exemplo n.º 1
0
void Sample2_Shared()
{
  // (A) create a new CSample instance with one reference
  boost::shared_ptr<Sample> mySample(new Sample);
  std::cout << "The Sample now has references:" << mySample.use_count() << "\n"; // should be 1

  // (B) assign a second pointer to it:
  boost::shared_ptr<Sample> mySample2 = mySample; // should be 2 refs by now
  std::cout << "The Sample now has references:" << mySample.use_count() << "\n";

  // (C) set the first pointer to NULL
  mySample.reset();
  std::cout << "The Sample now has references: " << mySample2.use_count() << "\n";  // 1

  // the object allocated in (1) is deleted automatically
  // when mySample2 goes out of scope
}
Exemplo n.º 2
0
void cma_es(Swarm &sw){
	
	if(logCMAES){
		printf("\n--------------------------------------------------------------\n");
		printf("Repository (%d)\n", sw.repository.getActualSize());
		for(int i=0;i<sw.repository.getActualSize();i++){
			printVector(sw.repository.getSolution(i).decisionVector, decisionNumber);
			printf("\n");
		}
	}
	
	double sigmaPrev=sw.sigma;
	myLearn(sw);
	
	// 		if(fabs(sigmaPrev-sw.sigma) > 10000){
	// 			fprintf(stderr, "\nWARNING!, sigma changed too much: %f -> %f. resetting...\n",sigmaPrev, sw.sigma); //shows warning message
	// 			sw.init=true; //set the init flag, so all the CMA-ES variables are reset
	// 			myLearn(sw); //learn again using the default values as base
	// 		}
	
	if(sw.sigma != sw.sigma || sw.sigma >= MAXDOUBLE){ //check for invalid numbers NaN or inf
		fprintf(stderr, "WARNING!, sigma: %f. resetting...\n", sw.sigma); //shows warning message
		// 		exit(1);
		sw.init=true; //set the init flag, so all the CMA-ES variables are reset
		myLearn(sw); //learn again using the default values as base
	}
	
	//four reset criteria as in the paper "injecting cma-es into moea/d"
	//NoEffectCoord
	for (int iKoo = 0; iKoo < decisionNumber; ++iKoo){
		if (sw.mean[iKoo] == sw.mean[iKoo] + 0.2*sw.sigma*sqrt(sw.C[iKoo][iKoo])){
			fprintf(stderr, "NoEffectCoordinate: standard deviation 0.2*%7.2e in coordinate %d without effect\n", sw.sigma*sqrt(sw.C[iKoo][iKoo]), iKoo); //shows warning message
			sw.init=true; //set the init flag, so all the CMA-ES variables are reset
			myLearn(sw); //learn again using the default values as base
			break;
		}
	}
	//NoEffectAxis
	int iKoo;
	for (int iAchse = 0; iAchse < decisionNumber; ++iAchse){
		double fac = 0.1 * sw.sigma * sw.D[iAchse];
		for (iKoo = 0; iKoo < decisionNumber; ++iKoo){ 
			if (sw.mean[iKoo] != sw.mean[iKoo] + fac * sw.B[iKoo][iAchse])
				break;
		}
		if (iKoo == decisionNumber){
			/* t->sigma *= exp(0.2+t->sp.cs/t->sp.damps); */
			fprintf(stderr, "NoEffectAxis: standard deviation 0.1*%7.2e in principal axis %d without effect\n", fac/0.1, iAchse);
			sw.init=true; //set the init flag, so all the CMA-ES variables are reset
			myLearn(sw); //learn again using the default values as base
			break;
		}
	}
	//TolXUp
	double stopTolUpXFactor=1e3;
	double initialStds=0.3;
	int i;
	for(i=0; i<decisionNumber; ++i) {
		if (sw.sigma * sqrt(sw.C[i][i]) > stopTolUpXFactor * initialStds)
			break;
	}
	if (i < decisionNumber) {
		fprintf(stderr, "TolUpX: standard deviation increased by more than %7.2e, larger initial standard deviation recommended \n", stopTolUpXFactor);
		sw.init=true; //set the init flag, so all the CMA-ES variables are reset
		myLearn(sw); //learn again using the default values as base
	}
	//ConditionCov
	double dMaxSignifKond=1e13;
	if (rgdouMax(sw.D, decisionNumber) >= rgdouMin(sw.D, decisionNumber) * dMaxSignifKond) {
		fprintf(stderr, "ConditionNumber: maximal condition number %7.2e reached. maxEW=%7.2e,minEW=%7.2e\n", dMaxSignifKond, rgdouMax(sw.D, decisionNumber), rgdouMin(sw.D, decisionNumber));
		sw.init=true; //set the init flag, so all the CMA-ES variables are reset
		myLearn(sw); //learn again using the default values as base
	}
	
	
	//************************************* RESAMPLE SOLUTIONS FROM THE GOOD FRONTS ******************************//
	//re-learn and re-sample when errors in the covariance matrix are detected in the sampling phase
	bool success=mySample(sw);
	while(!success){
		myLearn(sw);
		success=mySample(sw);
		fprintf(stderr,"Resample\n");
	}
	
	
// 	Neighbor orderedSwarms[swarmNumber];
// 	for(int i=0;i<swarmNumber;i++){
// 		orderedSwarms[i].index=i;
// 		orderedSwarms[i].distance=swarms[i].modelQuality*-1;//inverted model quality because we will use the preexisting sort, that sorts based on the smallest value (distance)
// 	}
// 	std::sort(orderedSwarms, orderedSwarms+swarmNumber, neighborsComparator);
// 	
// // 	for(int i=0;i<swarmNumber;i++){
// // 		printf("swarm: %d has quality %f sz: %d\n", orderedSwarms[i].index, orderedSwarms[i].distance*-1, repGlobal->getActualSize());
// // 	}
// // 	printf("\n\n\n");
// 
// 	bool good=false;
// 	for(int i=0;i<100;i++){
// 		if(sw.neighborhood[0].index == orderedSwarms[i].index){
// 			good=true;
// 			break;
// 		}
// 	}
// 	
// 	if(good){
// 		//re-learn and re-sample when errors in the covariance matrix are detected in the sampling phase
// 		bool success=mySampleReplacement(sw, 10);
// 		while(!success){
// 			myLearn(sw);
// 			success=mySampleReplacement(sw, 10);
// 			fprintf(stderr,"Resample\n");
// 		}
// 	}else{
// 		for(int i=0;i<sw.getSize();i++)
// 			sw.particles[i].solution.evalSolution=false;
// // 		//re-learn and re-sample when errors in the covariance matrix are detected in the sampling phase
// // 		bool success=mySample(sw);
// // 		while(!success){
// // 			myLearn(sw);
// // 			success=mySample(sw);
// // 			fprintf(stderr,"Resample\n");
// // 		}
// 	}
	
		//************************************* END OF RESAMPLING SOLUTIONS FROM THE GOOD FRONTS ******************************//
	
	if(logCMAES){
		printf("\n\npop after \n\n");
		for(int i=0; i<sw.getSize();i++){
			//printVector(evo.rgrgx[i],N+2);
			printVector(sw.particles[i].solution.decisionVector,decisionNumber);
			printf("\n");
		}
	}
}