示例#1
0
void PiMax::setSensorTeaching(const matrix::Matrix& teaching){
  assert(teaching.getM() == number_sensors && teaching.getN() == 1);
  // calculate the a_teaching,
  // that belongs to the distal teaching value by the inverse model.
  a_teaching = (A.pseudoInverse() * (teaching-b)).mapP(0.95, clip);
  intern_isTeaching=true;
}
示例#2
0
void PiMax::setMotorTeaching(const matrix::Matrix& teaching){
  assert(teaching.getM() == number_motors && teaching.getN() == 1);
  // Note: through the clipping the otherwise effectless
  //  teaching with old motor value has now an effect,
  //  namely to drive out of the saturation region.
  a_teaching= teaching.mapP(0.95,clip);
  intern_isTeaching=true;
}
示例#3
0
void SosAvgGrad::setC(const matrix::Matrix& _C){
  assert(C.getM() == _C.getM() && C.getN() == _C.getN());
  C=_C;
}
示例#4
0
void SosAvgGrad::setA(const matrix::Matrix& _A){
  assert(A.getM() == _A.getM() && A.getN() == _A.getN());
  A=_A;
}
示例#5
0
void SosAvgGrad::setS(const matrix::Matrix& _S){
  assert(S.getM() == _S.getM() && S.getN() == _S.getN());
  S=_S;
}
示例#6
0
void PiMax::seth(const matrix::Matrix& _h){
  assert(h.getM() == _h.getM() && h.getN() == _h.getN());
  h=_h;
}
示例#7
0
void PiMax::setA(const matrix::Matrix& _A){
  assert(A.getM() == _A.getM() && A.getN() == _A.getN());
  A=_A;
}
示例#8
0
void PiMax::setC(const matrix::Matrix& _C){
  assert(C.getM() == _C.getM() && C.getN() == _C.getN());
  C=_C;
}
示例#9
0
void RandomDyn::setC(const matrix::Matrix& _C){
  assert(C.getM() == _C.getM() && C.getN() == _C.getN());
  C=_C;
}
示例#10
0
void RandomDyn::seth(const matrix::Matrix& _h){
  assert(h.getM() == _h.getM() && h.getN() == _h.getN());
  h=_h;
}
示例#11
0
 static void keepMatrixTraceUp(matrix::Matrix& m){
   int l = std::min((short unsigned int)2,std::min(m.getM(), m.getN()));
   for(int i=0; i<l; i++){
     if(m.val(i,i)<0.8) m.val(i,i)+=0.001;
   }
 }
示例#12
0
 virtual void seth(const matrix::Matrix& _h){
   assert(h.getM() == _h.getM() && h.getN() == _h.getN());
   h=_h;
 }
示例#13
0
 virtual void setC(const matrix::Matrix& _C){
   assert(C.getM() == _C.getM() && C.getN() == _C.getN());
   C=_C;
 }
示例#14
0
double CuriosityLoop::updatePrediction(const matrix::Matrix& smHist, const matrix::Matrix& s, const matrix::Matrix& m, int phase){

        matrix::Matrix sm = s.above(m);
        matrix::Matrix f;
        f.set(1,1);
        f.val(0,0) = 1;
        sm = sm.above(f);

	//1. Go through the predictions of this predictor determining the prediction errors at each dimension.
	matrix::Matrix error;
	error.set(smHist.getM(), 1);

	prediction_error = 0;
	for(int i = 0; i < prediction.getM(); i++){
	 if(pOutput.val(i,0) == 1){ 
	  error.val(i,0) = prediction.val(i,0) - sm.val(i,0);
	  prediction_error = prediction_error + pow(error.val(i,0),2);
	//  cout << error << "predictionError\n";
	 }
	 else{
	//  cout << "This dimension is not predicted, and does not count towards the error\n";
	  error.val(i,0) = 0;
	  //prediction_error = prediction_error + error.val(i,0);
	 }
	}
	parent_error.val(phase,0) = prediction_error; 

	//2. Change the weights by the delta rule.
	for(int i = 0; i < prediction.getM(); i++){//to

		for(int j = 0; j < predictorWeights.getN(); j++){//from

//			predictorWeights.val(i,j) = predictorWeights.val(i,j) - 0.00001*error.val(i,0)*smHist.val(j,0);
			predictorWeights.val(i,j) = predictorWeights.val(i,j) - 0.0001*error.val(i,0)*smHist.val(j,0);

			if(predictorWeights.val(i,j) > 10)
				predictorWeights.val(i,j)  = 10; 
			else if(predictorWeights.val(i,j)  < -10)
				predictorWeights.val(i,j) = -10; 
			
		}

	}
	prediction_error_time_average = 0.9999*prediction_error_time_average + (1-0.9999)*prediction_error;  

	//Update the fitness of this predictor based on the instantaneous reduction / increase in prediction error. 
	this->fitness = 0.1 + 100*(prediction_error_time_average - old_prediction_error_time_average);
        old_prediction_error_time_average = prediction_error_time_average; 

	//cout << fitness << " "; 
 	
	//Improve the method of determining this gradient later! 


	//UPDATE THE UNRESTRICTED PREDICTOR NOW AS WELL, ALWAYS... 
	//1. Go through the predictions of this UNRESTRICTED predictor determining the prediction errors at each dimension.
	matrix::Matrix uError;
	uError.set(smHist.getM(), 1);

	uPrediction_error = 0;
	for(int i = 0; i < uPrediction.getM(); i++){
	 if(uPOutput.val(i,0) == 1){ 
	  uError.val(i,0) = uPrediction.val(i,0) - sm.val(i,0);
	  uPrediction_error = uPrediction_error + pow(uError.val(i,0),2);
	//  cout << error << "predictionError\n";
	 }
	 else{
	 // cout << "This dimension is not predicted, and does not count towards the error\n";
	  uError.val(i,0) = 0;
	  //prediction_error = prediction_error + error.val(i,0);
	 }

	}
	//cout << "phase = " << phase << "\n";  
	offspring_error.val(phase,0) = uPrediction_error; 
	//2. Change the weights by the delta rule.
	for(int i = 0; i < uPrediction.getM(); i++){

		for(int j = 0; j < uPredictorWeights.getN(); j++){

			uPredictorWeights.val(i,j) = uPredictorWeights.val(i,j) - 0.0001*uError.val(i,0)*smHist.val(j,0);

			if(uPredictorWeights.val(i,j) > 10)
				uPredictorWeights.val(i,j)  = 10; 
			else if(uPredictorWeights.val(i,j)  < -10)
				uPredictorWeights.val(i,j) = -10; 
			
		}

	}
	 
	//************************UNRESTRICTED PREDICTOR CODE ****************************
	


	return this->fitness; 
};
示例#15
0
void SeMoX::setSensorTeaching(const matrix::Matrix& teaching){
  assert(teaching.getM() == number_sensors && teaching.getN() == 1);
  // calculate the y_teaching, that belongs to the distal teaching value by the inverse model.
  y_teaching = (A.pseudoInverse(0.001) * (teaching-B)).mapP(0.95, clip);
  intern_useTeaching=true;
}