std::string IndependentParametersError::write_information(void) const
{
   std::ostringstream buffer;

   buffer << "Independent parameters error: " << calculate_performance() << "\n";

   return(buffer.str());
}
예제 #2
0
std::string NeuralParametersNorm::write_information(void) const
{
   std::ostringstream buffer;
   
   buffer << "Neural parameters norm: " << calculate_performance() << "\n";

   return(buffer.str());
}
예제 #3
0
std::string SolutionsError::write_information(void) const
{
   std::ostringstream buffer;
   
   buffer << "Solutions error: " << calculate_performance() << "\n";

   return(buffer.str());
}
예제 #4
0
PerformanceTerm::FirstOrderPerformance
MeanSquaredError::calculate_first_order_performance(void) const {
// Control sentence

#ifndef NDEBUG

  check();

#endif

  FirstOrderPerformance first_order_performance;

  first_order_performance.performance = calculate_performance();
  first_order_performance.gradient = calculate_gradient();

  return (first_order_performance);
}
예제 #5
0
PerformanceTerm::SecondOrderPerformance
MeanSquaredError::calculate_second_order_performance(void) const {
// Control sentence

#ifndef NDEBUG

  check();

#endif

  SecondOrderPerformance second_order_performance;

  second_order_performance.performance = calculate_performance();
  second_order_performance.gradient = calculate_gradient();
  second_order_performance.Hessian = calculate_Hessian();

  return (second_order_performance);
}
예제 #6
0
Vector<double> RootMeanSquaredError::calculate_gradient(void) const
{
   // Control sentence

   #ifdef __OPENNN_DEBUG__ 

   check();

   #endif

   // Neural network stuff

   const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();

   const size_t inputs_number = multilayer_perceptron_pointer->get_inputs_number();
   const size_t outputs_number = multilayer_perceptron_pointer->get_outputs_number();

   const size_t layers_number = multilayer_perceptron_pointer->get_layers_number();

   const size_t parameters_number = multilayer_perceptron_pointer->count_parameters_number();

   // Data set stuff

   Vector< Vector< Vector<double> > > first_order_forward_propagation(2); 

   const bool has_conditions_layer = neural_network_pointer->has_conditions_layer();

   const ConditionsLayer* conditions_layer_pointer = has_conditions_layer ? neural_network_pointer->get_conditions_layer_pointer() : NULL;

   Vector<double> particular_solution;
   Vector<double> homogeneous_solution;

   // Data set stuff

   const Instances& instances = data_set_pointer->get_instances();

   const size_t training_instances_number = instances.count_training_instances_number();

   const Vector<size_t> training_indices = instances.arrange_training_indices();

   size_t training_index;

   const Variables& variables = data_set_pointer->get_variables();

   const Vector<size_t> inputs_indices = variables.arrange_inputs_indices();
   const Vector<size_t> targets_indices = variables.arrange_targets_indices();

   Vector<double> inputs(inputs_number);
   Vector<double> targets(outputs_number);

   // Performance functional stuff

   const double performance = calculate_performance();

   Vector< Vector<double> > layers_delta; 

   Vector<double> output_gradient(outputs_number);

   Vector<double> point_gradient(parameters_number, 0.0);

   // Main loop

   Vector<double> gradient(parameters_number, 0.0);

   int i = 0;

   #pragma omp parallel for private(i, training_index, inputs, targets, first_order_forward_propagation, output_gradient, \
    layers_delta, particular_solution, homogeneous_solution, point_gradient)

   for(i = 0; i < (int)training_instances_number; i++)
   {
       training_index = training_indices[i];

      inputs = data_set_pointer->get_instance(training_index, inputs_indices);

      targets = data_set_pointer->get_instance(training_index, targets_indices);

      first_order_forward_propagation = multilayer_perceptron_pointer->calculate_first_order_forward_propagation(inputs);

	  const Vector< Vector<double> >& layers_activation = first_order_forward_propagation[0];
      const Vector< Vector<double> >& layers_activation_derivative = first_order_forward_propagation[1];

      if(!has_conditions_layer)
      {
         output_gradient = (layers_activation[layers_number-1]-targets)/(training_instances_number*performance);

         layers_delta = calculate_layers_delta(layers_activation_derivative, output_gradient);
      }
      else
      {
         particular_solution = conditions_layer_pointer->calculate_particular_solution(inputs);
         homogeneous_solution = conditions_layer_pointer->calculate_homogeneous_solution(inputs);

         output_gradient = (particular_solution+homogeneous_solution*layers_activation[layers_number-1] - targets)/(training_instances_number*performance);

         layers_delta = calculate_layers_delta(layers_activation_derivative, homogeneous_solution, output_gradient);
      }

      point_gradient = calculate_point_gradient(inputs, layers_activation, layers_delta);

      #pragma omp critical

      gradient += point_gradient;
   }

   return(gradient);
}