Esempio n. 1
0
const double Neuron::get_delta(double avg_activation)
{
  static double sparsity_penalty_weight = 3;
  static double sparsity = .05;
  double delta = 0;
  for(Synapse *out_synapse : outgoing_synapses) {
    delta += out_synapse->weight * out_synapse->to->delta;
  }

  //printf("%f\n", avg_activation);

  /*
  delta += sparsity_penalty_weight *
    (-(sparsity / avg_activation) + ((1 - sparsity) / (1 - avg_activation)));
  */

  delta *= activation_derivative();
  return delta;
}
Esempio n. 2
0
void MultilayerPerceptron::backpropagation(cv::Mat expected,
	cv::Mat actual)
{
	cv::Mat error = actual - expected;
	float* error_input_ptr = error.ptr< float >(0);
	float* error_output_ptr = _error_signals.ptr< float >(
		_error_signals.rows - 1);
	for(int i = 0; i < error.cols; ++i)
	{
		error_output_ptr[i] = error_input_ptr[i];
	}
	int* layer_ptr = _layers.ptr< int >(0);
	layer_ptr++; //Skip input layer;
	for(int l = _weights.size() - 1; l >= 0; --l)
	{
		float* sum_ptr = _last_sums.ptr< float >(l);
		float* gradient_ptr = _last_gradients.ptr< float >(l);
		error_input_ptr = _error_signals.ptr< float >(l + 1);
		error_output_ptr = _error_signals.ptr< float >(l);
		//Clear the error signal from the previous layer
		for(int n = 0; n < layer_ptr[l - 1]; ++n)
		{
			error_output_ptr[n] = 0.0f;
		}
		for(int n = 0; n < layer_ptr[l]; ++n)
		{
			float* weight_ptr = _weights[l].ptr< float >(n);
			gradient_ptr[n] = error_input_ptr[n]
				* activation_derivative(sum_ptr[n]);
			//Use number of nodes of previous layer as number of inputs.
			for(int w = 0; w < layer_ptr[l - 1]; ++w)
			{
				error_output_ptr[w] += gradient_ptr[n] * weight_ptr[w];
			}
		}
	}
}
Esempio n. 3
0
const double Neuron::get_delta_for_label(const double label)
{
  return (label - activation) * activation_derivative();
}