Exemple #1
0
void    KPCA::computeEigens(bool centering) {
    eigens.clear();
    std::vector<double> vectors(dim*dim),lambdas(dim);

    memcpy(&vectors[0],gram,sizeof(double)*dim*dim);
    if (centering)
    {
        std::cout << "kpca => centering" << std::endl;
        matrix_sym_centering_double (&vectors[0],dim,NULL);
    }

    matrix_eigSym_double(&vectors[0],&lambdas[0],dim);
    
    double max = 0;
    for (size_t r=0;r<dim;r++) {
        if (fabs(lambdas[r]) > max)
            max = fabs(lambdas[r]);
    }
    if (max < 1E-7)
        return;
    for (size_t r=0;r<dim;r++) {
        if (fabs(lambdas[r]) > 1E-6*max) {
            eigens.push_back(Eigen(lambdas[r],&vectors[r*dim],dim));
        }
    }
}
Exemple #2
0
bool mySampleReplacement (Swarm &sw, int trials){
	int N=decisionNumber;
	
	//-------------------------------------------------------------------------------------------
	
	/* calculate eigensystem */
	double rgdTmp[N];
	Eigen( N, sw.C, sw.D, sw.B, rgdTmp);
	//By using Cholesky decomposition, it turns out that the determinant of a matrix is equal to the product of its eigenvalues
	sw.det=sw.D[0];
	for(int i=1;i<N;i++)
		sw.det*=sw.D[i];
	
	double lg=log(sw.det);
	if(lg >= MAXDOUBLE || lg != lg){
		fprintf(stderr, "WARNING!, log of the covariance matrix determinant: %f. ", lg); //shows warning message
		sw.init=true; //set the init flag, so all the CMA-ES variables are reset
		return false;
	}
	
	for (int i = 0; i < N; ++i)
		sw.D[i] = sqrt(sw.D[i]);
	
	for(int i=0;i<N;i++){
		if(sw.D[i] != sw.D[i] || sw.D[i] >= MAXDOUBLE || sw.D[i] < 0){//tests for 1)null 2)inf 3) positive definiteness of the covariance matrix
			// 		if(sw.det != sw.det || sw.det <= MAXDOUBLE*-1 || sw.D[i] != sw.D[i] || sw.D[i] >= MAXDOUBLE || sw.D[i] < 0){//tests for 1) determinant null or invalid 2)null 3)inf 4) positive definiteness of the covariance matrix
			
			fprintf(stderr, "WARNING!, value: %f in eigenValues vector. ", sw.D[i]); //shows warning message
			sw.init=true; //set the init flag, so all the CMA-ES variables are reset
			return false;
			// 			exit(1);
			//myLearn(sw, neighboringSwarms); //learn again using the default values as base
		}
	}
	
	//-------------------------------------------------------------------------------------------

	double previousSol[N];
	if(sw.getSize() == 1)
		memcpy(previousSol, sw.particles[0].solution.decisionVector, sizeof(double)*N);
	
// 	//ranking for the new surrogate functions
// 	if(repGlobal->getActualSize() >0 ){
// 		repGlobal->organize();
// 		for(int i=0;i<repGlobal->getActualSize();i++)
// 			repGlobal->getSolutions()[i].crowdingDistance=logLikelihood(repGlobal->getSolution(i).decisionVector, sw);//stores temporarily in the crowding distance field
// 		
// 		std::sort(repGlobal->getSolutions(), repGlobal->getSolutions()+repGlobal->getActualSize(), crowdingComparatorSol);
// 		
// 		
// 		double likelihood=logLikelihood(sw.particles[0].solution.decisionVector, sw);
// 		
// 		double rank = likelihoodRanking(sw.particles[0].solution, *repGlobal, sw);
// 		
// 		if(rank >0 && rank < repGlobal->getActualSize())
// 			printf("rank: %.1f lik before: %f lik: %f lik after: %f\n", rank, repGlobal->getSolution((int)rank).crowdingDistance,likelihood,repGlobal->getSolution((int)rank+1).crowdingDistance);
// 		else
// 			printf("rank: %f\n", rank);
// 	}
	
	for (int iNk = 0; iNk < sw.getSize(); ++iNk){//using the likelihood as surrogate
		double best[N], bestValue=MAXDOUBLE*-1;//using best as highest likelihood to get convergence
// 		double best[N], bestValue=MAXDOUBLE;//using the best as lowest likelihood to get diversity
		for(int t=0;t<trials;t++){//trials per particle and choose the best
			double candidate[N];
			
			for (int i = 0; i < N; ++i)
				rgdTmp[i] = sw.D[i] * cmaes_random_Gauss();
			/* add mutation (sigma * B * (D*z)) */
			for (int i = 0; i < N; ++i) {
				double sum = 0.0;
				for (int j = 0; j < N; ++j)
					sum += sw.B[i][j] * rgdTmp[j];

				candidate[i]=sw.mean[i] + sw.sigma * sum;//normalized new solution
			}
			double opinion=neighborhoodOpinion(sw, candidate);
// 			printf("opinion: %f of: ",opinion);
// 			printVector(candidate, N);
// 			printf("\n");
			if(opinion > bestValue){//using best as highest likelihood to get convergence
// 			if(opinion < bestValue){//using the best as lowest likelihood to get diversity
				for (int i = 0; i < N; ++i)
					best[i]=candidate[i];
				bestValue=opinion;
			}
		}
		
		sw.particles[iNk].solution.evalSolution=true;
		for (int i = 0; i < N; ++i)
			sw.particles[iNk].solution.decisionVector[i] = (best[i]*(superiorPositionLimit[i]-inferiorPositionLimit[i]))+inferiorPositionLimit[i]; //unormalize solution
	}

	if(sw.getSize() == 1){
		if(isEqual(previousSol, sw.particles[0].solution.decisionVector, N)){
			sw.sigma *= exp(0.2+cSigma/dSigma);
			fprintf(stderr, "Warning: sigma increased due to equal decision vectors.\n");
			return false;
		}
// 		else
// 			for(int i=0;i<N;i++)
// 				printf("%.14f ", abs(previousSol[i]-sw.particles[0].solution.decisionVector[i]));
// 			printf("\n");
	}else{
		if(isEqual(sw.particles[0].solution.decisionVector, sw.particles[sw.getSize()-1].solution.decisionVector, N)){
			sw.sigma *= exp(0.2+cSigma/dSigma);
			fprintf(stderr, "Warning: sigma increased due to equal decision vectors.\n");
			return false;
		}
	}
	return true;
}
Exemple #3
0
bool mySample (Swarm &sw){
	int N=decisionNumber;
	/* calculate eigensystem */

	if(logCMAES){
		printf("\n----------------------before eigen calculation ---------------------- ");
		
// 		printf("\nCov mat: \n");
// 		for (int i = 0; i < N; ++i){
// 			printVector(sw.C[i], N);
// 			printf("\n");
// 		}
		
		printf("\nB mat: \n");
		for (int i = 0; i < N; ++i){
			printVector(sw.B[i], N);
			printf("\n");
		}
		
		printf("\nD mat: \n");
		printVector(sw.D, N);
		printf("\n");
		
		printf("---------------------end before ---------------------- \n");
		
		printf("\nCov mat: \n");
		for (int i = 0; i < N; ++i){
			printVector(sw.C[i], N);
			printf("\n");
		}
	}	
	
	double rgdTmp[N];
	Eigen( N, sw.C, sw.D, sw.B, rgdTmp);
	//By using Cholesky decomposition, it turns out that the determinant of a matrix is equal to the product of its eigenvalues
	sw.det=sw.D[0];
	for(int i=1;i<N;i++)
		sw.det*=sw.D[i];
	
	double lg=log(sw.det);
	if(lg >= MAXDOUBLE || lg != lg){
		fprintf(stderr, "WARNING!, log of the covariance matrix determinant: %f. ", lg); //shows warning message
		sw.init=true; //set the init flag, so all the CMA-ES variables are reset
		return false;
	}
	
	for (int i = 0; i < N; ++i)
		sw.D[i] = sqrt(sw.D[i]);
	
	for(int i=0;i<N;i++){
		if(sw.D[i] != sw.D[i] || sw.D[i] >= MAXDOUBLE || sw.D[i] < 0){//tests for 1)null 2)inf 3) positive definiteness of the covariance matrix
			// 		if(sw.det != sw.det || sw.det <= MAXDOUBLE*-1 || sw.D[i] != sw.D[i] || sw.D[i] >= MAXDOUBLE || sw.D[i] < 0){//tests for 1) determinant null or invalid 2)null 3)inf 4) positive definiteness of the covariance matrix
			
			fprintf(stderr, "WARNING!, value: %f in eigenValues vector. ", sw.D[i]); //shows warning message
			sw.init=true; //set the init flag, so all the CMA-ES variables are reset
			return false;
			// 			exit(1);
			//myLearn(sw, neighboringSwarms); //learn again using the default values as base
		}
	}
	
	if(logCMAES){
		printf("---------------------after eigen calculation ---------------------- \n");
		
// 		printf("\nCov mat: \n");
// 		for (int i = 0; i < N; ++i){
// 			printVector(sw.C[i], N);
// 			printf("\n");
// 		}
		
		printf("B mat: \n");
		for (int i = 0; i < N; ++i){
			printVector(sw.B[i], N);
			printf("\n");
		}
		
		printf("\nD mat: \n");
		printVector(sw.D, N);
		printf("\n");
		
		printf("---------------------end after ---------------------- \n");
		
		printf("\nsigma: %f (%e)\n",sw.sigma, sw.sigma);
		
		for (int i = 0; i < N; ++i)
			rgdTmp[i] = sw.D[i] * 3;
		printf("\nD (sample) (* 3) : ");
		for (int i = 0; i < N; ++i){
			double sum = 0.0;
			for (int j = 0; j < N; ++j)
				sum += sw.B[i][j] * rgdTmp[j];
			if((sw.mean[i] + sw.sigma * sum ) >= 0 )
				printf(" ");
			printf("%.3f ", /*fabs*/(sw.mean[i] + sw.sigma * sum ) );
		}
		
		for (int i = 0; i < N; ++i)
			rgdTmp[i] = sw.D[i] * 0;
		printf("\nD (sample) (* 0) : ");
		for (int i = 0; i < N; ++i){
			double sum = 0.0;
			for (int j = 0; j < N; ++j)
				sum += sw.B[i][j] * rgdTmp[j];
			if((sw.mean[i] + sw.sigma * sum ) >= 0 )
				printf(" ");
			printf("%.3f ", /*fabs*/(sw.mean[i] + sw.sigma * sum ) );
		}
		
		for (int i = 0; i < N; ++i)
			rgdTmp[i] = sw.D[i] * -3;
		printf("\nD (sample) (* -3): ");
		for (int i = 0; i < N; ++i){
			double sum = 0.0;
			for (int j = 0; j < N; ++j)
				sum += sw.B[i][j] * rgdTmp[j];
			if((sw.mean[i] + sw.sigma * sum ) >= 0 )
				printf(" ");
			printf("%.3f ", /*fabs*/(sw.mean[i] + sw.sigma * sum ) );
		}
	}
	

// // 	printVector(sw.mean, decisionNumber); //show the average decision vector per iteration
// // 	printf("\n");
	
	
// 	printf("\nD (sample) (difsum): ");
// 	for (int i = 0; i < N; ++i){
// 		for (j = 0, sum = 0.0; j < N; ++j)
// 			sum += sw.B[i][j] * rgdTmp[j];
// 		printf("%f ", fabs(sw.mean[i] + sw.sigma * sw.D[i]*-3 )+fabs(sw.mean[i] + sw.sigma * sqrt(sw.D[i])*3 ) );
// 	}
	
	/*************************/

	double previousSol[N];
	if(sw.getSize() == 1)
		memcpy(previousSol, sw.particles[0].solution.decisionVector, sizeof(double)*N);

// 	original sampling
	for (int iNk = 0; iNk < sw.getSize(); ++iNk){ /* generate scaled cmaes_random vector (D * z)    */
	// 	int rescount=0;
	// 	while(true){//resample when invalid
	// 		bool resample=false;
		
			for (int i = 0; i < N; ++i)
				rgdTmp[i] = sw.D[i] * cmaes_random_Gauss();
			/* add mutation (sigma * B * (D*z)) */
			for (int i = 0; i < N; ++i) {
				double sum = 0.0;
				for (int j = 0; j < N; ++j)
					sum += sw.B[i][j] * rgdTmp[j];

				double value=sw.mean[i] + sw.sigma * sum;//normalized new solution
				
	// 			if(value < 0 || value > 1){
	// 				resample=true;
	// // 				printf("\nresample: %f (%d, %d)\n", value, i, rescount);
	// 			}else
					sw.particles[iNk].solution.evalSolution=true;
					sw.particles[iNk].solution.decisionVector[i] = (value*(superiorPositionLimit[i]-inferiorPositionLimit[i]))+inferiorPositionLimit[i]; //unormalize solution
			}
	// 		if(!resample || rescount > 100)
	// 			break;
	// 		else
	// 			rescount++;
	// 	}
	// 	if(rescount >= 100){
	// 		printf("\n GAVE UP \n");
	// 		exit(1);
	// 	}
	}
	
// 	//ranking for the new surrogate functions
// 	if(repGlobal->getActualSize() >0 ){
// 		repGlobal->organize();
// 		for(int i=0;i<repGlobal->getActualSize();i++)
// 			repGlobal->getSolutions()[i].crowdingDistance=logLikelihood(repGlobal->getSolution(i).decisionVector, sw);//stores temporarily in the crowding distance field
// 		
// 		std::sort(repGlobal->getSolutions(), repGlobal->getSolutions()+repGlobal->getActualSize(), crowdingComparatorSol);
// 		
// 		
// 		double likelihood=logLikelihood(sw.particles[0].solution.decisionVector, sw);
// 		
// 		double rank = likelihoodRanking(sw.particles[0].solution, *repGlobal, sw);
// 		
// 		if(rank >0 && rank < repGlobal->getActualSize())
// 			printf("rank: %.1f lik before: %f lik: %f lik after: %f\n", rank, repGlobal->getSolution((int)rank).crowdingDistance,likelihood,repGlobal->getSolution((int)rank+1).crowdingDistance);
// 		else
// 			printf("rank: %f\n", rank);
// 	}
	
	
	
	
	
// // // 	for (int iNk = 0; iNk < sw.getSize(); ++iNk){//using the likelihood as surrogate
// // // // 		double best[N], bestValue=MAXDOUBLE*-1;//using best as highest likelihood to get convergence
// // // 		double best[N], bestValue=MAXDOUBLE;//using the best as lowest likelihood to get diversity
// // // 		for(int t=0;t<10;t++){//ten trials per particle and choose the best
// // // 			double candidate[N];
// // // 			
// // // 			for (int i = 0; i < N; ++i)
// // // 				rgdTmp[i] = sw.D[i] * cmaes_random_Gauss();
// // // 			/* add mutation (sigma * B * (D*z)) */
// // // 			for (int i = 0; i < N; ++i) {
// // // 				double sum = 0.0;
// // // 				for (int j = 0; j < N; ++j)
// // // 					sum += sw.B[i][j] * rgdTmp[j];
// // // 
// // // 				candidate[i]=sw.mean[i] + sw.sigma * sum;//normalized new solution
// // // 			}
// // // 			double opinion=neighborhoodOpinion(sw, candidate);
// // // // 			printf("opinion: %f of: ",opinion);
// // // // 			printVector(candidate, N);
// // // // 			printf("\n");
// // // // 			if(opinion > bestValue){//using best as highest likelihood to get convergence
// // // 			if(opinion < bestValue){//using the best as lowest likelihood to get diversity
// // // 				for (int i = 0; i < N; ++i)
// // // 					best[i]=candidate[i];
// // // 				bestValue=opinion;
// // // 			}
// // // 		}
// // // 		
// // // 		for (int i = 0; i < N; ++i)
// // // 			sw.particles[iNk].solution.decisionVector[i] = (best[i]*(superiorPositionLimit[i]-inferiorPositionLimit[i]))+inferiorPositionLimit[i]; //unormalize solution
// // // 	}
	
	
	/* Test if function values are identical, escape flat fitness */ //original comment
	//Check if the sampled solutions are equal, or if only one solution is sampled, if it is equal to the previous solution
// 	if (t->rgFuncValue[t->index[0]] == t->rgFuncValue[t->index[(int)t->sp.lambda/2]]) {
// 		t->sigma *= exp(0.2+t->sp.cs/t->sp.damps);
// 		ERRORMESSAGE("Warning: sigma increased due to equal function values\n",
// 				   "   Reconsider the formulation of the objective function",0,0);
// 	}
	if(sw.getSize() == 1){
		if(isEqual(previousSol, sw.particles[0].solution.decisionVector, N)){
			sw.sigma *= exp(0.2+cSigma/dSigma);
			fprintf(stderr, "Warning: sigma increased due to equal decision vectors.\n");
			return false;
		}
// 		else
// 			for(int i=0;i<N;i++)
// 				printf("%.14f ", abs(previousSol[i]-sw.particles[0].solution.decisionVector[i]));
// 			printf("\n");
	}else{
		if(isEqual(sw.particles[0].solution.decisionVector, sw.particles[sw.getSize()-1].solution.decisionVector, N)){
			sw.sigma *= exp(0.2+cSigma/dSigma);
			fprintf(stderr, "Warning: sigma increased due to equal decision vectors.\n");
			return false;
		}
	}

	return true;
} /* SamplePopulation() */
Exemple #4
0
void    KPCA::setEigens (const double* values,const double* vectors,size_t count) {
    eigens.clear();
    for (size_t i=0;i<count;i++) {
        eigens.push_back(Eigen(values[i],vectors+i*dim,dim));
    }
}