/**
*   Função contruída com o objetivo de facilitar o treinamento de uma rede. Utiliza critérios 
* de parada  pré-definidos. O objetivo é paralizar o treinamento a partir do momento em que o 
* erro médio quadrático da rede em relação às amostras para de  diminuir. Recebe um  parâmetro 
* indicando um número mínimo de treinos, a a partir do qual se inicia a verificação da variaçao
* do erro médio quadrático. Recebe também o número de treinamentos a ser executado até que uma
* nova medição  do erro seja feita. Caso a variância (porcentual) das últimas n  medições seja 
* menor ou igual a um determinado valor (entre 0 e 1), paraliza o treinamento.
*   A função recebe ainda um conjunto de amostras (matriz de entradas/matriz de saídas), número 
* de amostras contidas nas matrizes, a dimensão de cada amostra de entrada e de cada amostra de 
* saída e um flag indicando se as amostras devem ser treinadas aleatoriamente ou em ordem.
*/
int BKPNeuralNet::AutoTrain( float**inMatrix, float **outMatrix, int inSize, int outSize, int nSamples, 
              int minTrains, int varVectorSize, float minStdDev, int numTrains, TrainType type, 
              float l_rate, float momentum, int* retExecutedTrains )
{
  // Casos de retorno:
  if( (!inMatrix) || (!outMatrix) || (inSize!=_nLayers[0]) || (_nLayers[_layers-1]!=outSize) )
    return -1;

  // O número de treinamentos inicial tem que ser pelo menos 0:
  if( *retExecutedTrains < 0 )
    *retExecutedTrains = 0;

  int thisSample = -1;    //< Variável auxiliar, indica a amostra a ser treinada.
  // Executando os treinamentos obrigatórios:
  for( int i=0 ; i<minTrains ; i++ )
  {
    if( type == ORDERED_TRAIN )
      thisSample = (++thisSample)%nSamples;
    if( type == RANDOM_TRAIN )
      thisSample = RandInt(0, (nSamples-1));
    Train( inSize, inMatrix[thisSample], outSize, outMatrix[thisSample], l_rate, momentum );
  }

  // Executando os demais treinamentos:
  float* varVector = new float[varVectorSize];  //< Vetor para conter as últimas medições de erro.
  int ptVarVector = 0;              //< Aponta para a primeira posição vazia de varVector.
  float lastVariance = (float)MAX_VALUE;   //< Variâvel que mantém o valor da varirância.
  float StdDev = (float)MAX_VALUE;   //< Variâvel que mantém o valor do desvio-padrão. 
  thisSample = -1;
  int nTrains=minTrains + *retExecutedTrains;  //< Mantém o número de treinamentos executados.
  bool varFlag = false;
  while( StdDev > minStdDev )
  {
    if( type == ORDERED_TRAIN )
      thisSample = (++thisSample)%nSamples;
    if( type == RANDOM_TRAIN )
      thisSample = RandInt(0, (nSamples-1));
    Train( inSize, inMatrix[thisSample], outSize, outMatrix[thisSample], l_rate, momentum );
    if( (nTrains%numTrains) == 0 ) //< A cada numTrains treinamentos, testa o erro:
    {
      float retRMS_Error = 0;
      float mean = 0;
      RMS_error( inMatrix, outMatrix, inSize, outSize, nSamples, &retRMS_Error );
      varFlag = ShiftLeft( varVector, varVectorSize, retRMS_Error, ptVarVector );
      if( varFlag == true )
      {
        lastVariance = Variance( varVector, varVectorSize, &mean );
        StdDev = ((float)sqrt(lastVariance))/mean;
      }
      ptVarVector++;
    }
    nTrains++;
    if( nTrains >= 90000 )   //< O número máximo de treinamentos será 150000.
      StdDev = minStdDev;

  }
  *retExecutedTrains = nTrains;
  return 0;
}
示例#2
0
RNN<LayerTypes, OutputLayerType, InitializationRuleType, PerformanceFunction
>::RNN(LayerType &&network,
       OutputType &&outputLayer,
       const arma::mat& predictors,
       const arma::mat& responses,
       InitializationRuleType initializeRule,
       PerformanceFunction performanceFunction) :
    network(std::forward<LayerType>(network)),
    outputLayer(std::forward<OutputType>(outputLayer)),
    performanceFunc(std::move(performanceFunction)),
    inputSize(0),
    outputSize(0)
{
  static_assert(std::is_same<typename std::decay<LayerType>::type,
                  LayerTypes>::value,
                  "The type of network must be LayerTypes.");

  static_assert(std::is_same<typename std::decay<OutputType>::type,
                OutputLayerType>::value,
                "The type of outputLayer must be OutputLayerType.");

  initializeRule.Initialize(parameter, NetworkSize(this->network), 1);
  NetworkWeights(parameter, this->network);

  Train(predictors, responses);
}
NaiveBayesClassifier<MatType>::NaiveBayesClassifier(
    const MatType& data,
    const arma::Row<size_t>& labels,
    const size_t classes,
    const bool incremental) :
    trainingPoints(0) // Set when we call Train().
{
  const size_t dimensionality = data.n_rows;

  // Perform training, after initializing the model to 0 (that is, if Train()
  // won't do that for us, which it won't if we're using the incremental
  // algorithm).
  if (incremental)
  {
    probabilities.zeros(classes);
    means.zeros(dimensionality, classes);
    variances.zeros(dimensionality, classes);
  }
  else
  {
    probabilities.set_size(classes);
    means.set_size(dimensionality, classes);
    variances.set_size(dimensionality, classes);
  }
  Train(data, labels, incremental);
}
xml::XMLElement* DecisionTree::loadXMLSettings(xml::XMLElement* elem)
{
	xml::XMLElement*e;
	e=elem->getSubElement(mT("scheme"));
	if(e)
	{
		if(m_scheme)
			delete m_scheme;
		m_scheme=new AttributesScheme();
		m_scheme->loadXMLSettings(e);
	}
	e=elem->getSubElement(mT("training"));
	if(e)
	{
		xml::XMLAttribute*attr=e->getAttribute(mT("Target"));
		if(!attr)
			return elem;
		TrainingDataSet ds(m_scheme);
		ds.loadExamplesFromXML(e);
		Train(&ds,attr->value);

		//test
		
		xml::XMLElement e(mT("DT"));
		exportXMLSettings(&e);
		GCPtr<OS::IStream> stream=gFileSystem.openFile(mT("IDT.xml"),OS::TXT_WRITE);
		xml::XMLWriter w;
		w.addElement(&e);
		OS::StreamWriter ww(stream);
		ww.writeString(w.flush());
		stream->close();

	}
	return elem;
}
/* Main function */
int main(int argc, char** argv){
	char* trainingFile, * trainingTargetFile, * testingFile;
	double duration = 0;

	verbose = 0;

	if(getenv("VERBOSE") != 0)
	{
		verbose = atoi(getenv("VERBOSE"));
	}

	/* read num inputs/outputs nodes */
	numInputNodes = atoi(argv[1]);

	numOutputNodes = atoi(argv[2]);

	numTrainingSamples = atoi(argv[3]);

	numTestSamples = atoi(argv[4]);

	/* read the number of Hidden layers in net */
	numHiddenLayers = atoi(argv[5]);

	neuronsPerLayer = atoi(argv[6]);

	/* read learning rate */
	learningRate = atof(argv[7]);

	/* read testing data file */
	testingFile = argv[8];

	/* read training data file */
	trainingFile = argv[9];

	/* read training target data  */
	trainingTargetFile = argv[10];

	/* initialize the neural network */
	InitNeuralNet();
	InitSamples(numTrainingSamples, numTestSamples);

	ReadFile(trainingFile, numInputNodes, numTrainingSamples, trainingSamples);
	ReadFile(trainingTargetFile, numOutputNodes, numTrainingSamples, trainingTargets);
	ReadFile(testingFile, numInputNodes, numTestSamples, testSamples);


	/* train the neural network */
	timerStart();

	Train();

	duration = timerStop();
	printf("Duration: %f seconds\n", (duration));

	Test();

	cleanUp();

	return 0;
}
LogisticRegression<MatType>::LogisticRegression(
    OptimizerType<LogisticRegressionFunction<MatType>>& optimizer) :
    parameters(optimizer.Function().GetInitialPoint()),
    lambda(optimizer.Function().Lambda())
{
  Train(optimizer);
}
示例#7
0
文件: trainer.hpp 项目: 0x0all/mlpack
    void Train(InputType& trainingData,
               OutputType& trainingLabels,
               InputType& validationData,
               OutputType& validationLabels)
    {
      // This generates [0 1 2 3 ... (ElementCount(trainingData) - 1)]. The
      // sequence will be used to iterate through the training data.
      index = arma::linspace<arma::Col<size_t> >(0,
          ElementCount(trainingData) - 1, ElementCount(trainingData));
      epoch = 0;

      while(true)
      {
        if (shuffle)
          index = arma::shuffle(index);

        Train(trainingData, trainingLabels);
        Evaluate(validationData, validationLabels);

        if (validationError <= tolerance)
          break;

        if (maxEpochs > 0 && ++epoch >= maxEpochs)
          break;
      }
    }
示例#8
0
void Train() {
	clock_t t;
	t = clock();
	for (int index = 0 ; index < featureSize ; index++)
		Train(index);
	t = clock() - t;
	journal << "Computing classifiers locally " << negCount + posCount << " times: " << ((float)t)/CLOCKS_PER_SEC << "seconds.\n";
}
示例#9
0
AdaBoost<MatType, WeakLearner>::AdaBoost(
    const MatType& data,
    const arma::Row<size_t>& labels,
    const WeakLearner& other,
    const size_t iterations,
    const double tol)
{
  Train(data, labels, other, iterations, tol);
}
示例#10
0
SoftmaxRegression<OptimizerType>::SoftmaxRegression(
    OptimizerType<SoftmaxRegressionFunction>& optimizer) :
    parameters(optimizer.Function().GetInitialPoint()),
    numClasses(optimizer.Function().NumClasses()),
    lambda(optimizer.Function().Lambda()),
    fitIntercept(optimizer.Function().FitIntercept())
{
  Train(optimizer);
}
LogisticRegression<MatType>::LogisticRegression(
    const MatType& predictors,
    const arma::Row<size_t>& responses,
    const double lambda) :
    parameters(arma::zeros<arma::vec>(predictors.n_rows + 1)),
    lambda(lambda)
{
  Train(predictors, responses);
}
示例#12
0
	int OnlineSVR::Train (double**X, double *Y, int ElementsNumber, int ElementsSize)
	{	
		Matrix<double>* NewX = new Matrix<double>(X, ElementsNumber, ElementsSize);
		Vector<double>* NewY = new Vector<double>(Y, ElementsNumber);
		int Flops = Train(NewX,NewY);
		delete NewX;
		delete NewY;
		return Flops;
	}
示例#13
0
//=================================================================================================
void PlayerController::TrainMove(float dt, bool run)
{
	move_tick += (run ? dt : dt / 10);
	if(move_tick >= 1.f)
	{
		move_tick -= 1.f;
		Train(TrainWhat::Move, 0.f, 0);
	}
}
示例#14
0
//=================================================================================================
void PlayerController::Rest(int days, bool resting, bool travel)
{
	// update effects that work for days, end other
	int best_nat;
	float prev_hp = unit->hp,
		prev_stamina = unit->stamina;
	unit->EndEffects(days, &best_nat);

	// regenerate hp
	if(unit->hp != unit->hpmax)
	{
		float heal = 0.5f * unit->Get(Attribute::END);
		if(resting)
			heal *= 2;
		if(best_nat)
		{
			if(best_nat != days)
				heal = heal*best_nat * 2 + heal*(days - best_nat);
			else
				heal *= 2 * days;
		}
		else
			heal *= days;

		heal = min(heal, unit->hpmax - unit->hp);
		unit->hp += heal;

		Train(Attribute::END, int(heal));
	}

	// send update
	Game& game = Game::Get();
	if(Net::IsOnline() && !travel)
	{
		if(unit->hp != prev_hp)
		{
			NetChange& c = Add1(Net::changes);
			c.type = NetChange::UPDATE_HP;
			c.unit = unit;
		}

		if(unit->stamina != prev_stamina && this != game.pc)
			game.GetPlayerInfo(this).update_flags |= PlayerInfo::UF_STAMINA;
	}

	// reset last damage
	last_dmg = 0;
	last_dmg_poison = 0;
	dmgc = 0;
	poison_dmgc = 0;

	// reset action
	action_cooldown = 0;
	action_recharge = 0;
	action_charges = GetAction().charges;
}
LogisticRegression<MatType>::LogisticRegression(
    const MatType& predictors,
    const arma::Row<size_t>& responses,
    OptimizerType& optimizer,
    const double lambda) :
    parameters(arma::rowvec(predictors.n_rows + 1, arma::fill::zeros)),
    lambda(lambda)
{
  Train(predictors, responses, optimizer);
}
MachineABC* LinearRegressorTrainer::Train(DataSet* pData)
{
	RegressorTrainerABC::SetData (pData);
	LinearRegressor* pRegressor=new LinearRegressor;
	if (!Train (pRegressor)){
		delete pRegressor;
		pRegressor=0;
	}
	return pRegressor;
}
示例#17
0
文件: layer.cpp 项目: bin3/toyml
bool Layer::Train(const NNetData& data) {
  for (std::size_t iter = 0; iter < options_.max_iterations; ++iter) {
    VLOG(2) << "iteration#" << iter;
    for (std::size_t i = 0; i < data.size(); ++i) {
      VLOG(4) << "instance#" << i;
      Train(data.input(i), data.label(i));
    }
  }
  return true;
}
LogisticRegression<MatType>::LogisticRegression(
    const MatType& predictors,
    const arma::Row<size_t>& responses,
    const arma::vec& initialPoint,
    const double lambda) :
    parameters(initialPoint),
    lambda(lambda)
{
  Train(predictors, responses);
}
示例#19
0
double SoftmaxRegression<OptimizerType>::Train(const arma::mat& data,
                                               const arma::Row<size_t>& labels,
                                               const size_t numClasses)
{
  SoftmaxRegressionFunction regressor(data, labels, numClasses,
                                      lambda, fitIntercept);
  OptimizerType<SoftmaxRegressionFunction> optimizer(regressor);

  return Train(optimizer);
}
示例#20
0
/*****************************************************************************************
 * int K_MeansPredict::k_FoldXV( const vector< vector< float > >& Ex, const float stopDist, const int stopIter )
 * purpose:
 *  performs k-fold cross validation using training examples
 *
 * 03.06.2006	djh	eliminated reassignment of _totalErrMean
 * 03.07.2006	djh	changed this_model_err += err;
 *                      to this_model_err += pow(err,2);
 *                      because old way allowed model with large negative error
 *                      to be selected as best
 * 03.08.2006	djh	replaced _totalUpper/_totalLowerConfBound with _totalBoundStub
 *****************************************************************************************/
 int K_MeansPredict::k_FoldXV( const vector< vector< float > >& Ex, const float stopDist, const int stopIter ){
  // number of folds
  int nFolds = 10;
  //Divide Examples into folds
  int chunk = Ex.size()/nFolds;
  float sum_err=0.;
  float sum_err2=0.;
  K_MeansPredict bestModel;
  float best_model_err;
  for( int i=0; i<nFolds; i++ ){
    cout << "# Fold " << i << endl;
    // Don't have to reset means, Train does that for us
    vector< vector< float > > TestSet;
    vector< vector< float > > TrainSet;
    float this_model_err=0;
    for( int j=0; j<Ex.size(); j++ ){
      if( j>=i*chunk  && j<(i+1)*chunk ){
        //cout << "Example " << j << "added to test set\n";
        TestSet.push_back( Ex[j] );
      }
      else{
        TrainSet.push_back( Ex[j] );
        //cout << "Example " << j << "added to train set\n";
      }
    }
    Train( TrainSet, stopDist, stopIter, 0 );
    for( int j=0; j<TestSet.size(); j++ ){
      float err = EvaluatePattern( TestSet[j] )-TestSet[j][0]; // first element is target value
      this_model_err += pow(err,2);
      sum_err += err;
      sum_err2 += pow( err,2 );
    }
    if( i==0 || this_model_err < best_model_err ){
      best_model_err = this_model_err;
      bestModel = *this;
    }
  }
  *this = bestModel;
  //bestPercep.Output( cout );
  //_totalErrMean = sum_err/( Ex.size() );
  float errVar = (sum_err2 - (sum_err/float(Ex.size())))/float(Ex.size()-1);
  cout << "# 10-fold x-validation:\n";
  cout << "#   Error:\n";
  cout << "#     Mean of Squared Errors (MSE) is : " << sum_err2/(float(Ex.size()) ) << endl; // 01/07/2006
  cout << "#     Error Mean is : " << _totalErrMean << endl;
  cout << "#     Error variance is: " << errVar << endl;
  float t_val = 1.960; // value t_(alpha/2,v-1) for alpha = 0.05 v=large
                       // this gives the t value for a 95% confidence 
  //_totalLowerConfBound = _totalErrMean - t_val*sqrt( errVar * (1.0+( 1.0/float(Ex.size()) )) );
  //_totalUpperConfBound = _totalErrMean + t_val*sqrt( errVar * (1.0+( 1.0/float(Ex.size()) )) );
  _totalBoundStub = sqrt( errVar * (1.0+( 1.0/float(Ex.size()) )) );
}
示例#21
0
Perceptron<LearnPolicy, WeightInitializationPolicy, MatType>::Perceptron(
    const Perceptron& other,
    const MatType& data,
    const arma::Row<size_t>& labels,
    const arma::rowvec& instanceWeights) :
    maxIterations(other.maxIterations)
{
  // Insert a row of ones at the top of the training data set.
  WeightInitializationPolicy wip;
  wip.Initialize(weights, biases, data.n_rows, other.NumClasses());

  Train(data, labels, instanceWeights);
}
示例#22
0
Perceptron<LearnPolicy, WeightInitializationPolicy, MatType>::Perceptron(
    const MatType& data,
    const arma::Row<size_t>& labels,
    const size_t numClasses,
    const size_t maxIterations) :
    maxIterations(maxIterations)
{
  WeightInitializationPolicy wip;
  wip.Initialize(weights, biases, data.n_rows, numClasses);

  // Start training.
  Train(data, labels);
}
示例#23
0
MainWindow::MainWindow(QWidget *parent) :
    QMainWindow(parent),
    ui(new Ui::MainWindow)
{
    ui->setupUi(this);

    this->faceDet = new facedetect();
    this->recognize = new facerecognize();

    QObject::connect(ui->PhotoDet_action, SIGNAL(triggered()), this, SLOT(PhotoDetec()));
    QObject::connect(ui->Video_action, SIGNAL(triggered()), this, SLOT(VideoDetec()));
    QObject::connect(ui->Train_action, SIGNAL(triggered()), this, SLOT(Train()));
    QObject::connect(ui->Recog_action, SIGNAL(triggered()), this, SLOT(PhotoRecognize()));
}
示例#24
0
int main(int argc, char** argv) {
    if (!strcmp(argv[1], "--train") && argc == 4) {
        Train(argv[2], argv[3]);
        return EXIT_SUCCESS;
    }

    if (!strcmp(argv[1], "--interactive") && argc == 3) {
        Interactive(argv[2]);
        return EXIT_SUCCESS;
    }

    std::cerr << "Usage: " << argv[0] << " --train <dataset> <model>\n";
    std::cerr << "Usage: " << argv[0] << " --interactive <model>\n";
    return EXIT_FAILURE;
}
示例#25
0
LocalCoordinateCoding::LocalCoordinateCoding(
    const arma::mat& data,
    const size_t atoms,
    const double lambda,
    const size_t maxIterations,
    const double tolerance,
    const DictionaryInitializer& initializer) :
    atoms(atoms),
    lambda(lambda),
    maxIterations(maxIterations),
    tolerance(tolerance)
{
  // Train the model.
  Train(data, initializer);
}
示例#26
0
SoftmaxRegression<OptimizerType>::SoftmaxRegression(const arma::mat& data,
                                                    const arma::Row<size_t>& labels,
                                                    const size_t numClasses,
                                                    const double lambda,
                                                    const bool fitIntercept) :
    numClasses(numClasses),
    lambda(lambda),
    fitIntercept(fitIntercept)
{
  SoftmaxRegressionFunction regressor(data, labels, numClasses,
                                      lambda, fitIntercept);
  OptimizerType<SoftmaxRegressionFunction> optimizer(regressor);

  parameters = regressor.GetInitialPoint();
  Train(optimizer);
}
/* Main function */
int main(int argc, char** argv){
	int** trainingData,**  targetValues;
	char* trainingFile, * trainingTargetFile, * testingFile;

	/* read num inputs/outputs nodes */
	neuronsPerLayer = atoi(argv[1]);

	numOutputNodes = atoi(argv[2]);

	numTrainingSamples = atoi(argv[3]);

	numTestSamples = atoi(argv[4]);

	/* read the number of Hidden layers in net */
	numHiddenLayers = atoi(argv[5]);

	/* read learning rate */
	learningRate = atof(argv[6]);

	/* read testing data file */
	testingFile = argv[7];

	/* read training data file */
	trainingFile = argv[8];

	/* read training target data  */
	trainingTargetFile = argv[9];

	/* initialize the neural network */
	InitNeuralNet();
	InitSamples(numTrainingSamples, numTestSamples);

	ReadFile(trainingFile, neuronsPerLayer, numTrainingSamples, trainingSamples);
	ReadFile(trainingTargetFile, numOutputNodes, numTrainingSamples, trainingTargets);
	ReadFile(testingFile, neuronsPerLayer, numTestSamples, testSamples);

	/* train the neural network */
	Train();

	Test();

	return 0;
}
示例#28
0
void cScenarioBallRL::NewCycleUpdate()
{
	if (EnableTraining())
	{
		// finish recording tuple from previous cycle
		RecordState(mCurrTuple.mStateEnd);
		RecordEndFlags(mCurrTuple);
		mCurrTuple.mReward = CalcReward(mCurrTuple);

		// do something with the tuple
		if (!mFirstCycle)
		{
			mTupleBuffer[mNumTuples] = mCurrTuple;
			++mNumTuples;

			if (mNumTuples == static_cast<int>(mTupleBuffer.size()))
			{
				Train();

				double exp_rate = GetExpRate();
				double exp_temp = GetExpTemp();
				auto& ctrl = mBall.GetController();
				ctrl->SetExpRate(exp_rate);
				ctrl->SetExpTemp(exp_temp);

				printf("\n");
				printf("Exp Rate: %.3f\n", exp_rate);
				printf("Exp Temp: %.3f\n", exp_temp);
			}
		}
		
		// start recording new tuple
		mCurrTuple.mStateBeg = mCurrTuple.mStateEnd;
		RecordAction(mCurrTuple.mAction);
		mCurrTuple.mActionLikelihood = GetActionLikelihood();

		ClearFlags(mCurrTuple);
		RecordBegFlags(mCurrTuple);

		mFirstCycle = false;
	}
}
示例#29
0
// the actual training regiment
void DoSimulation( void )
{
// reset the network, train patterns and test performance. Repeat for desired number of runs
	cerr << "run: ";
	for ( int run = 0; run < gCALMAPI->CALMGetNumRuns(); run++ )
	{
		*(gCALMAPI->GetCALMLog()) << "\nRUN " << run << endl;
		cerr << run << ' ';

	// start clean
		gCALMAPI->CALMReset( O_WT | O_TIME | O_WIN );

	// record duration of simulation
		gCALMAPI->CALMDuration( kStart );
		*(gCALMAPI->GetCALMLog()) << "\nTRAINING" << endl;

	// train the network on pattern file
		Train();
		
		*(gCALMAPI->GetCALMLog()) << "\nTESTING" << endl;

	// reset winning node information as well as time-delay activations 
		// (the latter only applies if time-delay connections are used)
		gCALMAPI->CALMReset( O_TIME | O_WIN );

	// test the network on pattern file
		Test( run );

	// end time recording, display duration
		gCALMAPI->CALMDuration( kEnd );

	// print out the final weight configuration	
		gCALMAPI->CALMShowWeights();
	}
	cerr << endl;

#if PLOT3D
	// stop 3D plotting
	gCALMAPI->CALMEnd3DPlot();
#endif
}
示例#30
0
void DriftModel::Record(double next_angle, double angle, double previous_angle, double velocity, double radius, double direction) {
  // If next_angle is so small, we probably just started and we are not
  // drifting any more. Doesn't make sense to record such entries.
  if (fabs(next_angle) < 1e-5) return;

  if (direction != 0) {
    raw_points_turn_.push_back({angle, previous_angle, velocity, radius, direction, next_angle});
  } else {
    raw_points_straight_.push_back({angle, previous_angle, velocity, radius, direction, next_angle});
  }

  if (!very_ready_) {
    Train();
  }

  double predicted = 0;
  if (IsReady()) {
    predicted = Predict(angle, previous_angle, velocity, radius, direction);
    error_tracker_.Add(predicted, next_angle);
  }
}