Vector<double> MeanSquaredError::calculate_terms(void) const {
// Control sentence

#ifndef NDEBUG

  check();

#endif

  // Neural network stuff

  const MultilayerPerceptron* multilayer_perceptron_pointer =
      neural_network_pointer->get_multilayer_perceptron_pointer();

  const unsigned inputs_number =
      multilayer_perceptron_pointer->get_inputs_number();
  const unsigned outputs_number =
      multilayer_perceptron_pointer->get_outputs_number();

  // Data set stuff

  const Instances& instances = data_set_pointer->get_instances();

  const unsigned training_instances_number =
      instances.count_training_instances_number();

  // Mean squared error stuff

  Vector<double> performance_terms(training_instances_number);

  Vector<double> inputs(inputs_number);
  Vector<double> outputs(outputs_number);
  Vector<double> targets(outputs_number);

  for (unsigned i = 0; i < training_instances_number; i++) {
    // Input vector

    inputs = data_set_pointer->get_training_input_instance(i);

    // Output vector

    outputs = multilayer_perceptron_pointer->calculate_outputs(inputs);

    // Target vector

    targets = data_set_pointer->get_training_target_instance(i);

    // Error

    performance_terms[i] = outputs.calculate_distance(targets);
  }

  return (performance_terms / sqrt((double)training_instances_number));
}
Vector<double> NormalizedSquaredError::calculate_terms(void) const
{
   // Control sentence (if debug)

   #ifdef __OPENNN_DEBUG__ 

   check();

   #endif

   // Neural network stuff

   const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();

   const size_t inputs_number = multilayer_perceptron_pointer->get_inputs_number();
   const size_t outputs_number = multilayer_perceptron_pointer->get_outputs_number();

   // Data set stuff

   const Instances& instances = data_set_pointer->get_instances();

   const size_t training_instances_number = instances.count_training_instances_number();

   const Vector<size_t> training_indices = instances.arrange_training_indices();

   size_t training_index;

   const Variables& variables = data_set_pointer->get_variables();

   const Vector<size_t> inputs_indices = variables.arrange_inputs_indices();
   const Vector<size_t> targets_indices = variables.arrange_targets_indices();

   const Vector<double> training_target_data_mean = data_set_pointer->calculate_training_target_data_mean();

   // Calculate

   Vector<double> performance_terms(training_instances_number);

   Vector<double> inputs(inputs_number);
   Vector<double> outputs(outputs_number);
   Vector<double> targets(outputs_number);

   double normalization_coefficient = 0.0;

   int i = 0;

   #pragma omp parallel for private(i, training_index, inputs, outputs, targets) reduction(+ : normalization_coefficient)

   for(i = 0; i < (int)training_instances_number; i++)
   {
       training_index = training_indices[i];

       // Input vector

      inputs = data_set_pointer->get_instance(training_index, inputs_indices);

      // Output vector

      outputs = multilayer_perceptron_pointer->calculate_outputs(inputs);

      // Target vector

      targets = data_set_pointer->get_instance(training_index, targets_indices);

      // Sum squared error

	  performance_terms[i] = outputs.calculate_distance(targets);

	  // Normalization coefficient

	  normalization_coefficient += targets.calculate_sum_squared_error(training_target_data_mean);
   }

   if(normalization_coefficient < 1.0e-99)
   {
      std::ostringstream buffer;

      buffer << "OpenNN Exception: NormalizedSquaredError class.\n"
             << "Vector<double> calculate_terms(void) const method.\n"
             << "Normalization coefficient is zero.\n"
             << "Unuse constant target variables or choose another error functional. ";

      throw std::logic_error(buffer.str());
   }

   return(performance_terms/sqrt(normalization_coefficient));
}