Пример #1
0
double derv(double *w) {
  *w += 0.0000001;
  double x = pow(feed_forward() - f(input), 2); // E(x+0.00001)
  *w -= 0.000002;
  double y = pow(feed_forward() - f(input), 2); // E(x-0.00001)
  *w += 0.0000001;
  double diff = x - y;
  return diff / 0.0000001;
}
Пример #2
0
void MultilayerPerceptron::train_sync(cv::Mat train_data,
	cv::Mat expected_outputs)
{
	cv::Mat output;
	for(int i = 0; i < train_data.rows; ++i)
	{
		output = feed_forward(train_data.row(i));
		backpropagation(expected_outputs.row(i), output);
		update_weights();
	}
}
Пример #3
0
void train(void) {
  int i, j, k, l;
  for (i = 0; i < sizeof trset / sizeof trset[0]; i++) {
    printf("\nTrainning Round %d\n", i);

    // feed an input into the network
    
    input = trset[i];
    feed_forward();
    printe();

    // back-propagate

    Neuron cpy[LAYER_NUM][NEURON_PER_LAYER];
    memcpy(cpy, hlayers, sizeof cpy);

    for (j = 0; j < LAYER_NUM; j++) {
      for (k = 0; k < NEURON_PER_LAYER; k++) {
        if (j == 0) {
          cpy[0][k].weights[0] = hlayers[0][k].weights[0] -
            derv(&hlayers[0][k].weights[0]) * RATE;
        }else {
          for (l = 0; l < NEURON_PER_LAYER; l++) {
            cpy[j][k].weights[l] = hlayers[j][k].weights[l] -
              derv(&hlayers[j][k].weights[l]) * RATE;
          }
        }
      }
    }

    Neuron outcpy = output;

    for (j = 0; j < NEURON_PER_LAYER; j++) {
      outcpy.weights[j] = output.weights[j] -
        derv(&output.weights[j]) * RATE;
    }
    
    memcpy(hlayers, cpy, sizeof hlayers); // update weights
    output = outcpy;

    // printw();
  }
}