double ArtificialNeuralNetwork::train(const std::vector<double>& dataInputs, const std::vector<double>& dataTargets)
{
    m_inputs[0] = dataInputs;
    m_targets = dataTargets;

    layerForward();
    double errorTot = 0;
    errorTot += computeOutputError();
    errorTot += computeHiddenError();
    adjustWeights();
    return errorTot;
}
void SOM::training(std::vector< std::vector<double> >& inputData)
{
    for (int epoch = 0; epoch < numEpochs; epoch++)
    {
        for (unsigned int example = 0; example < inputData.size(); example++)
        {
            shortestDistance(inputData[example]);
            int index = ( epoch * inputData.size() ) + example;
            adjustWeights(index, inputData[example]);
        }

        lastSeason++;

        if(distance < acceptableError)
            break;
    }
}
Example #3
0
/// <summary>
/// Perform learning (weight adjustments) for all units on the topological map
/// </summary>
void Ttopmap::learn()
{
  int x;
  int y,i;
  float d;
  float value;
  float max;


  //store the current input image on the winning unit
  if (WinnerX>-1)
  {

    max = (float)(RadiusExcite * RadiusExcite);
    for (x = (WinnerX - RadiusExcite);x<=(WinnerX + RadiusExcite);x++)
	{
      for (y = (WinnerY - RadiusExcite);y<=(WinnerY + RadiusExcite);y++)
	  {
        if ((x >= 0) && (x < map_width) && (y >= 0) && (y < map_height))
		{
          d = Dist(x, y) / (2 * max);
          if (d < 1)
		  {
            value = randVal(d);
            adjustWeights(x, y, value);

            if (!((x == WinnerX) && (y == WinnerY)))
			{
              for (i=0;i<10;i++)
                classificationMulti[x][y][i] = classificationMulti[x][y][i] + (int)((classificationMulti[WinnerX][WinnerY][i] - classificationMulti[x][y][i]) * (1 - d) * learningRate);
			}

		  }
		}
	  }
	}

    for (x=0;x<inputs_width;x++)
      for (y=0;y<inputs_height;y++)
	    image[WinnerX][WinnerY][x][y] = inputs[x][y];
  }

  //update the threshold based upon the average similarity
  Threshold = (Threshold + (average_similarity*10)) / 2;
}
Example #4
0
void
trainNetwork( FILE *log_file, nn_type *nn, dd_type *data )
{
    int     i, j, p;
    int     epoch, errors;
    double  drand48();
    void    determineOutput();
    void    adjustWeights();
    
    srand48( time(0) );
    
    /*
     *  Initialize weights and thresholds to small random values in
     *  the range (-0.5, 0.5). I use the convention that -W1[0][j]
     *  and -W2[0][j] are the thresholds for the hidden units and
     *  the output units, respectively.
     */
    for( i = 0; i <= nn->n_input; i++ )
        for( j = 1; j <= nn->n_hidden; j++ ) {
            nn->W1[i][j] = drand48() - 0.5;
        }
    
    for( i = 0; i <= nn->n_hidden; i++ )
        for( j = 1; j <= nn->n_output; j++ ) {
            nn->W2[i][j] = drand48() - 0.5;
        }
    
    nn->h[0] = 1.0;
    
    epoch = 0;
    do {
        epoch++;
        errors = 0;
        
        /*
         *  Loop through training data (one epoch).
         */
        for( p = 0; p < data->n; p++ ) {
            
            /*
             *  Determine the actual output of the network
             *  for the given example x[p].
             */
            determineOutput( nn, data->x[p] );
            
            /*
             *  Determine the number of classification errors by
             *  comparing the desired output with the actual output.
             *  YOU MAY WISH TO CHANGE THIS.
             */
            for( j = 1; j <= nn->n_output; j++ ) {
                if( fabs( nn->o[j] - data->y[p][j] ) > ACCEPT_TRAIN ) {
                    errors++;
                    break;
                }
            }
            
            /*
             *  Determine how to adjust weights to reduce sum
             *  of squared error for the given example x[p], y[p].
             */
            if( errors ) {
                adjustWeights( nn, data->x[p], data->y[p] );
            }
        }
        
        fprintf( log_file, "Epoch: %3d,  ", epoch );
        fprintf( log_file, "Number of classification errors: %d (/%d)\n",
                errors, data->n );
        fflush( log_file );
        
        /*
         *  Stopping criteria.
         *  YOU MAY WISH TO CHANGE THIS.
         */
    } while ((errors > MIN_ERRORS) && (epoch < MAX_EPOCH));
    
    fprintf( log_file, "Training epochs = %5d\n", epoch );
    fprintf( log_file, "Classification errors on training data = %d (/%d)\n",
            errors, data->n );
    fflush( log_file );
}