void InvertNChannelControllerHebbH::learnHebb(const matrix::Matrix& context_sensors, const matrix::Matrix& h_update){

  // preprocess context sensors
  Matrix c_sensors = context_sensors;
  for (int i=0;i<number_context_sensors;i++){
   if (c_sensors.val(i,0)<0.15) {
      c_sensors.val(i,0)=0; // IR's should only have positive values
    }
  }

  // adapt hebbian weights
  for (uint i=0; i<number_motors; i++){
    for (uint j=0; j<(uint)number_context_sensors; j++){
      if (i==j){ // TODO: remove (it is just for testing)
      double dp=  eps_hebb* h_update.val(i,0) * c_sensors.val(j,0) *(1 - pow(p.val(i,j),2));
      //      std::cout<<eps_hebb<<"*"<<h_update.val(i,0)<<" * "<<c_sensors.val(j,0)<<std::endl;
      p.val(i,j)+=dp;
      }
    }
  }
  /*
  // remove this !!! (just a test)
  for (int i=0; i<number_motors; i++){
    for (int j=0; j<number_context_sensors; j++){
      if ((j==0) || (j==1)){
        p.val(i,j)=-0.1;
      } else {
        p.val(i,j)=0.1;
      }
    }
  }
  */

}
/**
 * predict the update of h based on the actual context sensors
 * @param context_sensors prediction is based on these sensors
 */
matrix::Matrix InvertNChannelControllerHebbH::predictHebb(const matrix::Matrix& context_sensors){
  // preprocess context sensors
  Matrix c_sensors = context_sensors;
  for (int i=0;i<number_context_sensors;i++){
   if (c_sensors.val(i,0)<0.15) {
      c_sensors.val(i,0)=0; // IR's should only have positive values
    }
  }


  Matrix pred_h_update(number_motors,1) ;
  for (unsigned int k = 0; k < number_motors; k++) {
    pred_h_update.val(k,0)=0;
  }

  for (uint i=0; i<number_motors; i++){
    for (uint j=0; j<(uint)number_context_sensors; j++){
      pred_h_update.val(i,0)+= p.val(i,j) *  context_sensors.val(j,0);
    }
  }


  return pred_h_update;

}
예제 #3
0
 static void keepMatrixTraceUp(matrix::Matrix& m){
   int l = std::min((short unsigned int)2,std::min(m.getM(), m.getN()));
   for(int i=0; i<l; i++){
     if(m.val(i,i)<0.8) m.val(i,i)+=0.001;
   }
 }
예제 #4
0
double CuriosityLoop::updatePrediction(const matrix::Matrix& smHist, const matrix::Matrix& s, const matrix::Matrix& m, int phase){

        matrix::Matrix sm = s.above(m);
        matrix::Matrix f;
        f.set(1,1);
        f.val(0,0) = 1;
        sm = sm.above(f);

	//1. Go through the predictions of this predictor determining the prediction errors at each dimension.
	matrix::Matrix error;
	error.set(smHist.getM(), 1);

	prediction_error = 0;
	for(int i = 0; i < prediction.getM(); i++){
	 if(pOutput.val(i,0) == 1){ 
	  error.val(i,0) = prediction.val(i,0) - sm.val(i,0);
	  prediction_error = prediction_error + pow(error.val(i,0),2);
	//  cout << error << "predictionError\n";
	 }
	 else{
	//  cout << "This dimension is not predicted, and does not count towards the error\n";
	  error.val(i,0) = 0;
	  //prediction_error = prediction_error + error.val(i,0);
	 }
	}
	parent_error.val(phase,0) = prediction_error; 

	//2. Change the weights by the delta rule.
	for(int i = 0; i < prediction.getM(); i++){//to

		for(int j = 0; j < predictorWeights.getN(); j++){//from

//			predictorWeights.val(i,j) = predictorWeights.val(i,j) - 0.00001*error.val(i,0)*smHist.val(j,0);
			predictorWeights.val(i,j) = predictorWeights.val(i,j) - 0.0001*error.val(i,0)*smHist.val(j,0);

			if(predictorWeights.val(i,j) > 10)
				predictorWeights.val(i,j)  = 10; 
			else if(predictorWeights.val(i,j)  < -10)
				predictorWeights.val(i,j) = -10; 
			
		}

	}
	prediction_error_time_average = 0.9999*prediction_error_time_average + (1-0.9999)*prediction_error;  

	//Update the fitness of this predictor based on the instantaneous reduction / increase in prediction error. 
	this->fitness = 0.1 + 100*(prediction_error_time_average - old_prediction_error_time_average);
        old_prediction_error_time_average = prediction_error_time_average; 

	//cout << fitness << " "; 
 	
	//Improve the method of determining this gradient later! 


	//UPDATE THE UNRESTRICTED PREDICTOR NOW AS WELL, ALWAYS... 
	//1. Go through the predictions of this UNRESTRICTED predictor determining the prediction errors at each dimension.
	matrix::Matrix uError;
	uError.set(smHist.getM(), 1);

	uPrediction_error = 0;
	for(int i = 0; i < uPrediction.getM(); i++){
	 if(uPOutput.val(i,0) == 1){ 
	  uError.val(i,0) = uPrediction.val(i,0) - sm.val(i,0);
	  uPrediction_error = uPrediction_error + pow(uError.val(i,0),2);
	//  cout << error << "predictionError\n";
	 }
	 else{
	 // cout << "This dimension is not predicted, and does not count towards the error\n";
	  uError.val(i,0) = 0;
	  //prediction_error = prediction_error + error.val(i,0);
	 }

	}
	//cout << "phase = " << phase << "\n";  
	offspring_error.val(phase,0) = uPrediction_error; 
	//2. Change the weights by the delta rule.
	for(int i = 0; i < uPrediction.getM(); i++){

		for(int j = 0; j < uPredictorWeights.getN(); j++){

			uPredictorWeights.val(i,j) = uPredictorWeights.val(i,j) - 0.0001*uError.val(i,0)*smHist.val(j,0);

			if(uPredictorWeights.val(i,j) > 10)
				uPredictorWeights.val(i,j)  = 10; 
			else if(uPredictorWeights.val(i,j)  < -10)
				uPredictorWeights.val(i,j) = -10; 
			
		}

	}
	 
	//************************UNRESTRICTED PREDICTOR CODE ****************************
	


	return this->fitness; 
};