Exemplo n.º 1
0
void RootMeanSquaredErrorTest::test_calculate_gradient(void)
{
   message += "test_calculate_gradient\n";

   NumericalDifferentiation nd;

   NeuralNetwork nn;

   Vector<double> network_parameters;

   DataSet ds;

   RootMeanSquaredError rmse(&nn, &ds);

   Vector<double> objective_gradient;
   Vector<double> numerical_objective_gradient;

   // Test

   nn.set(3, 4, 2);
   nn.initialize_parameters(0.0);

   ds.set(3, 2, 5);
   ds.initialize_data(0.0);

   // Test

   nn.set(3, 4, 2);
   nn.initialize_parameters(1.0);

   network_parameters = nn.arrange_parameters();

   ds.set(3, 2, 5);
   ds.initialize_data(1.0);

   objective_gradient = rmse.calculate_gradient();
   numerical_objective_gradient = nd.calculate_gradient(rmse, &RootMeanSquaredError::calculate_performance, network_parameters);
   assert_true((objective_gradient - numerical_objective_gradient).calculate_absolute_value() < 1.0e-3, LOG);

   // Test

   nn.set(1,1,1);

   network_parameters = nn.arrange_parameters();

   ds.set(1,1,1);
   ds.initialize_data(1.0);

   rmse.set_neural_network_pointer(&nn);

   objective_gradient = rmse.calculate_gradient();
   numerical_objective_gradient = nd.calculate_gradient(rmse, &RootMeanSquaredError::calculate_performance, network_parameters);
   assert_true((objective_gradient - numerical_objective_gradient).calculate_absolute_value() < 1.0e-3, LOG);
}
void NeuralParametersNormTest::test_calculate_performance(void)   
{
   message += "test_calculate_performance\n";

   NeuralNetwork nn;
   Vector<double> neural_parameters;

   NeuralParametersNorm npn(&nn);

   Vector<double> parameters;

   double performance;

   // Test

   nn.set(1, 1);
   nn.initialize_parameters(0.0);

   performance = npn.calculate_regularization();

   assert_true(performance == 0.0, LOG);

   // Test

   nn.set(1, 1);
   nn.initialize_parameters(3.1415927);

   parameters = nn.arrange_parameters();

   assert_true(npn.calculate_regularization() == npn.calculate_regularization(parameters), LOG);
}
Exemplo n.º 3
0
void NeuralNetworkTest::test_arrange_parameters(void) {
  message += "test_arrange_parameters\n";

  NeuralNetwork nn;
  Vector<double> parameters;

  IndependentParameters* ip;

  // Test

  nn.set();
  parameters = nn.arrange_parameters();

  assert_true(parameters.size() == 0, LOG);

  // Test

  nn.set(1, 1, 1);

  ip = new IndependentParameters(1);
  nn.set_independent_parameters_pointer(ip);
  nn.initialize_parameters(0.0);
  parameters = nn.arrange_parameters();

  assert_true(parameters.size() == 5, LOG);
  assert_true(parameters == 0.0, LOG);

  // Test

  nn.set();
  ip = new IndependentParameters(1);
  nn.set_independent_parameters_pointer(ip);
  nn.initialize_parameters(0.0);
  parameters = nn.arrange_parameters();
  assert_true(parameters.size() == 1, LOG);
  assert_true(parameters == 0.0, LOG);

  // Test

  nn.set(1, 1, 1);
  ip = new IndependentParameters(1);
  nn.set_independent_parameters_pointer(ip);
  nn.initialize_parameters(0.0);
  parameters = nn.arrange_parameters();
  assert_true(parameters.size() == 5, LOG);
  assert_true(parameters == 0.0, LOG);
}
void NormalizedSquaredErrorTest::test_calculate_gradient(void)
{
   message += "test_calculate_gradient\n";

   NumericalDifferentiation nd;

   NeuralNetwork nn;

   Vector<double> network_parameters;

   DataSet ds;
   Matrix<double> data;

   NormalizedSquaredError nse(&nn, &ds);

   Vector<double> objective_gradient;
   Vector<double> numerical_objective_gradient;

   // Test 

   nn.set(1,1,1);

   nn.initialize_parameters(0.0);

   ds.set(1, 1, 2);

   data.set(2, 2);
   data[0][0] = -1.0;
   data[0][1] = -1.0;
   data[1][0] = 1.0;
   data[1][1] = 1.0;

   ds.set_data(data);

   objective_gradient = nse.calculate_gradient();

   assert_true(objective_gradient.size() == nn.count_parameters_number(), LOG);
   assert_true(objective_gradient == 0.0, LOG);

   // Test 

   nn.set(3, 4, 5);
   nn.randomize_parameters_normal();

   network_parameters = nn.arrange_parameters();

   ds.set(3, 5, 2);
   ds.randomize_data_normal();

   objective_gradient = nse.calculate_gradient();
   numerical_objective_gradient = nd.calculate_gradient(nse, &NormalizedSquaredError::calculate_performance, network_parameters);

   assert_true((objective_gradient - numerical_objective_gradient).calculate_absolute_value() < 1.0e-3, LOG);
}
Exemplo n.º 5
0
void NeuralNetworkTest::test_randomize_parameters_normal(void) {
  message += "test_randomize_parameters_normal\n";

  NeuralNetwork nn;
  Vector<double> network_parameters;

  // Test

  nn.set(1, 1, 1);
  nn.randomize_parameters_normal(1.0, 0.0);
  network_parameters = nn.arrange_parameters();
  assert_true(network_parameters == 1.0, LOG);
}
Exemplo n.º 6
0
void NeuralNetworkTest::test_randomize_parameters_uniform(void) {
  message += "test_randomize_parameters_uniform\n";

  NeuralNetwork nn;
  Vector<double> parameters;

  // Test

  nn.set(1, 1, 1);
  nn.randomize_parameters_uniform();
  parameters = nn.arrange_parameters();
  assert_true(parameters >= -1.0, LOG);
  assert_true(parameters <= 1.0, LOG);
}
Exemplo n.º 7
0
void NeuralNetworkTest::test_set_parameters(void) {
  message += "test_set_parameters\n";

  Vector<unsigned> multilayer_perceptron_architecture;
  NeuralNetwork nn;

  unsigned parameters_number;
  Vector<double> parameters;

  // Test

  nn.set_parameters(parameters);

  parameters = nn.arrange_parameters();
  assert_true(parameters.size() == 0, LOG);

  // Test

  multilayer_perceptron_architecture.set(2, 2);
  nn.set(multilayer_perceptron_architecture);

  nn.construct_independent_parameters();

  nn.get_independent_parameters_pointer()->set_parameters_number(2);

  parameters_number = nn.count_parameters_number();

  parameters.set(0.0, 1.0, parameters_number - 1);
  nn.set_parameters(parameters);
  parameters = nn.arrange_parameters();

  assert_true(parameters.size() == parameters_number, LOG);
  assert_true(parameters[0] == 0.0, LOG);
  assert_true(parameters[parameters_number - 1] == parameters_number - 1.0,
              LOG);
}
void PerformanceFunctionalTest::test_calculate_gradient(void)
{
   message += "test_calculate_gradient\n";

   NeuralNetwork nn;

   size_t parameters_number;
   Vector<double> parameters;

   PerformanceFunctional pf(&nn);

   pf.destruct_all_terms();
   pf.set_regularization_type(PerformanceFunctional::NEURAL_PARAMETERS_NORM_REGULARIZATION);

   Vector<double> gradient;

   // Test

   nn.set(1, 1, 1);

   nn.initialize_parameters(0.0);

   parameters = nn.arrange_parameters();

   gradient = pf.calculate_gradient(parameters);

   assert_true(gradient == 0.0, LOG);

   // Test

   parameters_number = nn.count_parameters_number();
   nn.initialize_parameters(0.0);

   MockPerformanceTerm* mptp = new MockPerformanceTerm(&nn);

   pf.set_user_objective_pointer(mptp);

   gradient = pf.calculate_gradient();

   assert_true(gradient.size() == parameters_number, LOG);
   assert_true(gradient == 0.0, LOG);
}
Exemplo n.º 9
0
void NeuralNetworkTest::test_initialize_parameters(void) {
  message += "test_initialize_parameters\n";

  NeuralNetwork nn;
  Vector<double> parameters;

  IndependentParameters* ip;

  // Test

  nn.set(1, 1, 1);

  nn.construct_independent_parameters();

  ip = nn.get_independent_parameters_pointer();
  ip->set_parameters_number(1);

  nn.randomize_parameters_normal(1.0, 0.0);
  parameters = nn.arrange_parameters();
  assert_true(parameters == 1.0, LOG);
}
void MeanSquaredErrorTest::test_calculate_Jacobian_terms(void)
{
   message += "test_calculate_Jacobian_terms\n";

   NumericalDifferentiation nd;

   NeuralNetwork nn;
   Vector<unsigned> multilayer_perceptron_architecture;
   Vector<double> parameters;

   DataSet ds;

   MeanSquaredError mse(&nn, &ds);

   Vector<double> objective_gradient;

   Vector<double> evaluation_terms;
   Matrix<double> terms_Jacobian;
   Matrix<double> numerical_Jacobian_terms;

   // Test

   nn.set(1, 1);

   nn.initialize_parameters(0.0);

   ds.set(1, 1, 1);

   ds.initialize_data(0.0);

   terms_Jacobian = mse.calculate_terms_Jacobian();

   assert_true(terms_Jacobian.get_rows_number() == ds.get_instances().count_training_instances_number(), LOG);
   assert_true(terms_Jacobian.get_columns_number() == nn.count_parameters_number(), LOG);
   assert_true(terms_Jacobian == 0.0, LOG);

   // Test 

   nn.set(3, 4, 2);
   nn.initialize_parameters(0.0);

   ds.set(3, 2, 5);
   mse.set(&nn, &ds);
   ds.initialize_data(0.0);

   terms_Jacobian = mse.calculate_terms_Jacobian();

   assert_true(terms_Jacobian.get_rows_number() == ds.get_instances().count_training_instances_number(), LOG);
   assert_true(terms_Jacobian.get_columns_number() == nn.count_parameters_number(), LOG);
   assert_true(terms_Jacobian == 0.0, LOG);

   // Test

   multilayer_perceptron_architecture.set(3);
   multilayer_perceptron_architecture[0] = 2;
   multilayer_perceptron_architecture[1] = 1;
   multilayer_perceptron_architecture[2] = 2;

   nn.set(multilayer_perceptron_architecture);
   nn.initialize_parameters(0.0);

   ds.set(2, 2, 5);
   mse.set(&nn, &ds);
   ds.initialize_data(0.0);

   terms_Jacobian = mse.calculate_terms_Jacobian();

   assert_true(terms_Jacobian.get_rows_number() == ds.get_instances().count_training_instances_number(), LOG);
   assert_true(terms_Jacobian.get_columns_number() == nn.count_parameters_number(), LOG);
   assert_true(terms_Jacobian == 0.0, LOG);

   // Test

   nn.set(1, 1, 1);
   nn.randomize_parameters_normal();
   parameters = nn.arrange_parameters();

   ds.set(1, 1, 1);
   ds.randomize_data_normal();

   terms_Jacobian = mse.calculate_terms_Jacobian();
   numerical_Jacobian_terms = nd.calculate_Jacobian(mse, &MeanSquaredError::calculate_terms, parameters);

   assert_true((terms_Jacobian-numerical_Jacobian_terms).calculate_absolute_value() < 1.0e-3, LOG);

   // Test

   nn.set(2, 2, 2);
   nn.randomize_parameters_normal();
   parameters = nn.arrange_parameters();

   ds.set(2, 2, 2);
   ds.randomize_data_normal();

   terms_Jacobian = mse.calculate_terms_Jacobian();
   numerical_Jacobian_terms = nd.calculate_Jacobian(mse, &MeanSquaredError::calculate_terms, parameters);

   assert_true((terms_Jacobian-numerical_Jacobian_terms).calculate_absolute_value() < 1.0e-3, LOG);

   // Test

   nn.set(2, 2, 2);
   nn.randomize_parameters_normal();

   ds.set(2, 2, 2);
   ds.randomize_data_normal();
   
   objective_gradient = mse.calculate_gradient();

   evaluation_terms = mse.calculate_terms();
   terms_Jacobian = mse.calculate_terms_Jacobian();

   assert_true(((terms_Jacobian.calculate_transpose()).dot(evaluation_terms)*2.0 - objective_gradient).calculate_absolute_value() < 1.0e-3, LOG);
}
void MeanSquaredErrorTest::test_calculate_gradient(void)
{
   message += "test_calculate_gradient\n";

   NumericalDifferentiation nd;

   NeuralNetwork nn;
   Vector<unsigned> multilayer_perceptron_architecture;

   Vector<double> parameters;

   DataSet ds;

   MeanSquaredError mse(&nn, &ds);

   Vector<double> objective_gradient;
   Vector<double> numerical_objective_gradient;
   Vector<double> numerical_differentiation_error;

   // Test

   nn.set(1, 1, 1);

   nn.initialize_parameters(0.0);

   ds.set(1, 1, 1);

   ds.initialize_data(0.0);

   objective_gradient = mse.calculate_gradient();

   assert_true(objective_gradient.size() == nn.count_parameters_number(), LOG);
   assert_true(objective_gradient == 0.0, LOG);

   // Test 

   nn.set(3, 4, 2);
   nn.initialize_parameters(0.0);

   ds.set(3, 2, 5);
   mse.set(&nn, &ds);
   ds.initialize_data(0.0);

   objective_gradient = mse.calculate_gradient();

   assert_true(objective_gradient.size() == nn.count_parameters_number(), LOG);
   assert_true(objective_gradient == 0.0, LOG);

   // Test

   multilayer_perceptron_architecture.set(3);
   multilayer_perceptron_architecture[0] = 2;
   multilayer_perceptron_architecture[1] = 1;
   multilayer_perceptron_architecture[2] = 3;

   nn.set(multilayer_perceptron_architecture);
   nn.initialize_parameters(0.0);

   ds.set(2, 3, 5);
   mse.set(&nn, &ds);
   ds.initialize_data(0.0);

   objective_gradient = mse.calculate_gradient();

   assert_true(objective_gradient.size() == nn.count_parameters_number(), LOG);
   assert_true(objective_gradient == 0.0, LOG);

   // Test

   nn.set(1, 1, 1);

   nn.initialize_parameters(0.0);

   ds.set(1, 1, 1);

   ds.initialize_data(0.0);

   objective_gradient = mse.calculate_gradient();

   assert_true(objective_gradient.size() == nn.count_parameters_number(), LOG);
   assert_true(objective_gradient == 0.0, LOG);

   // Test 

   nn.set(3, 4, 2);
   nn.initialize_parameters(0.0);

   ds.set(3, 2, 5);
   mse.set(&nn, &ds);
   ds.initialize_data(0.0);

   objective_gradient = mse.calculate_gradient();

   assert_true(objective_gradient.size() == nn.count_parameters_number(), LOG);
   assert_true(objective_gradient == 0.0, LOG);

   // Test

   nn.set(1, 1);
   nn.initialize_parameters(1.0);
   parameters = nn.arrange_parameters();

   ds.set(1, 1, 2);
   ds.initialize_data(1.0);

   objective_gradient = mse.calculate_gradient();
   numerical_objective_gradient = nd.calculate_gradient(mse, &MeanSquaredError::calculate_performance, parameters);   
   assert_true((objective_gradient - numerical_objective_gradient).calculate_absolute_value() < 1.0e-3, LOG);
}
Exemplo n.º 12
0
void NeuralNetworkTest::test_calculate_outputs(void) {
  message += "test_calculate_outputs\n";

  NeuralNetwork nn;

  unsigned inputs_number;
  unsigned outputs_number;

  Vector<unsigned> architecture;

  Vector<double> inputs;
  Vector<double> outputs;

  unsigned parameters_number;

  Vector<double> parameters;

  // Test

  nn.set(3, 4, 2);
  nn.initialize_parameters(0.0);

  inputs.set(3, 0.0);

  outputs = nn.calculate_outputs(inputs);

  assert_true(outputs == 0.0, LOG);

  // Test

  nn.set(1, 1, 1);
  nn.initialize_parameters(0.0);

  inputs.set(1, 0.0);

  outputs = nn.calculate_outputs(inputs);

  assert_true(outputs == 0.0, LOG);

  // Test

  nn.set(1, 1);

  inputs.set(1);
  inputs.randomize_normal();

  parameters = nn.arrange_parameters();

  assert_true(
      nn.calculate_outputs(inputs) == nn.calculate_outputs(inputs, parameters),
      LOG);

  // Test

  nn.set(4, 3, 5);

  inputs.set(4, 0.0);

  parameters_number = nn.count_parameters_number();

  parameters.set(parameters_number, 0.0);

  outputs = nn.calculate_outputs(inputs, parameters);

  assert_true(outputs.size() == 5, LOG);
  assert_true(outputs == 0.0, LOG);

  // Test

  architecture.set(5);

  architecture.randomize_uniform(5, 10);

  nn.set(architecture);

  inputs_number = nn.get_inputs_pointer()->get_inputs_number();
  outputs_number = nn.get_outputs_pointer()->get_outputs_number();

  inputs.set(inputs_number, 0.0);

  parameters_number = nn.count_parameters_number();

  parameters.set(parameters_number, 0.0);

  outputs = nn.calculate_outputs(inputs, parameters);

  assert_true(outputs.size() == outputs_number, LOG);
  assert_true(outputs == 0.0, LOG);
}
void PerformanceFunctionalTest::test_calculate_performance(void)
{
   message += "test_calculate_performance\n";

   DataSet ds;

   NeuralNetwork nn;

   Vector<double> parameters;

   PerformanceFunctional pf(&nn);

   double performance;

   Vector<double> direction;
   double rate;

   // Test

   pf.destruct_all_terms();
   pf.set_regularization_type(PerformanceFunctional::NEURAL_PARAMETERS_NORM_REGULARIZATION);

   NeuralParametersNorm* neural_parameters_norm = pf.get_neural_parameters_norm_regularization_pointer();

   double neural_parameters_norm_weight = neural_parameters_norm->get_neural_parameters_norm_weight();

   nn.set(1, 1);

   nn.initialize_parameters(1.0);

   parameters = nn.arrange_parameters();

   assert_true(fabs(pf.calculate_performance() - neural_parameters_norm_weight*sqrt(2.0)) < 1.0e-3, LOG);

   assert_true(fabs(pf.calculate_performance() - pf.calculate_performance(parameters)) < 1.0e-3, LOG);

   // Test

   parameters = nn.arrange_parameters();

   assert_true(pf.calculate_performance() != pf.calculate_performance(parameters*2.0), LOG);

   // Test

   direction.set(2, -0.5);
   rate = 2.0;

   assert_true(pf.calculate_performance(direction, rate) == 0.0, LOG);

   // Test

   parameters = nn.arrange_parameters();

   direction.set(2, -1.5);
   rate = 2.3;

   assert_true(pf.calculate_performance(direction, rate) == pf.calculate_performance(parameters + direction*rate), LOG);

   // Test

   ds.set(1, 1, 1);
   ds.randomize_data_normal();

   pf.set_data_set_pointer(&ds);

   pf.destruct_all_terms();
   pf.set_objective_type(PerformanceFunctional::SUM_SQUARED_ERROR_OBJECTIVE);

   nn.set(1, 1);

   nn.initialize_parameters(1.0);

   parameters = nn.arrange_parameters();

   assert_true(fabs(pf.calculate_performance() - pf.calculate_performance(parameters)) < 1.0e-3, LOG);

   // Test

   parameters = nn.arrange_parameters();

   assert_true(pf.calculate_performance() != pf.calculate_performance(parameters*2.0), LOG);

   // Test

   parameters = nn.arrange_parameters();

   direction.set(2, -1.5);
   rate = 2.3;

   assert_true(pf.calculate_performance(direction, rate) == pf.calculate_performance(parameters + direction*rate), LOG);

   // Test

   nn.initialize_parameters(0.0);

   MockPerformanceTerm* mptp = new MockPerformanceTerm(&nn);

   pf.set_user_objective_pointer(mptp);

   performance = pf.calculate_performance();

   assert_true(performance == 0.0, LOG);
}
void SumSquaredErrorTest::test_calculate_terms_Jacobian(void)
{   
   message += "test_calculate_terms_Jacobian\n";

   NumericalDifferentiation nd;

   NeuralNetwork nn;
   Vector<size_t> architecture;
   Vector<double> parameters;

   DataSet ds;

   SumSquaredError sse(&nn, &ds);

   Vector<double> gradient;

   Vector<double> terms;
   Matrix<double> terms_Jacobian;
   Matrix<double> numerical_Jacobian_terms;

   // Test

   nn.set(1, 1);

   nn.initialize_parameters(0.0);

   ds.set(1, 1, 1);

   ds.initialize_data(0.0);

   terms_Jacobian = sse.calculate_terms_Jacobian();

   assert_true(terms_Jacobian.get_rows_number() == ds.get_instances().get_instances_number(), LOG);
   assert_true(terms_Jacobian.get_columns_number() == nn.count_parameters_number(), LOG);
   assert_true(terms_Jacobian == 0.0, LOG);

   // Test 

   nn.set(3, 4, 2);
   nn.initialize_parameters(0.0);

   ds.set(3, 2, 5);
   sse.set(&nn, &ds);
   ds.initialize_data(0.0);

   terms_Jacobian = sse.calculate_terms_Jacobian();

   assert_true(terms_Jacobian.get_rows_number() == ds.get_instances().count_training_instances_number(), LOG);
   assert_true(terms_Jacobian.get_columns_number() == nn.count_parameters_number(), LOG);
   assert_true(terms_Jacobian == 0.0, LOG);

   // Test

   architecture.set(3);
   architecture[0] = 5;
   architecture[1] = 1;
   architecture[2] = 2;

   nn.set(architecture);
   nn.initialize_parameters(0.0);

   ds.set(5, 2, 3);
   sse.set(&nn, &ds);
   ds.initialize_data(0.0);

   terms_Jacobian = sse.calculate_terms_Jacobian();

   assert_true(terms_Jacobian.get_rows_number() == ds.get_instances().count_training_instances_number(), LOG);
   assert_true(terms_Jacobian.get_columns_number() == nn.count_parameters_number(), LOG);
   assert_true(terms_Jacobian == 0.0, LOG);

   // Test

   nn.set(1, 1, 1);
   nn.randomize_parameters_normal();
   parameters = nn.arrange_parameters();

   ds.set(1, 1, 1);
   ds.randomize_data_normal();

   terms_Jacobian = sse.calculate_terms_Jacobian();
   numerical_Jacobian_terms = nd.calculate_Jacobian(sse, &SumSquaredError::calculate_terms, parameters);

   assert_true((terms_Jacobian-numerical_Jacobian_terms).calculate_absolute_value() < 1.0e-3, LOG);

   // Test

   nn.set(2, 2, 2);
   nn.randomize_parameters_normal();
   parameters = nn.arrange_parameters();

   ds.set(2, 2, 2);
   ds.randomize_data_normal();

   terms_Jacobian = sse.calculate_terms_Jacobian();
   numerical_Jacobian_terms = nd.calculate_Jacobian(sse, &SumSquaredError::calculate_terms, parameters);

   assert_true((terms_Jacobian-numerical_Jacobian_terms).calculate_absolute_value() < 1.0e-3, LOG);

   // Test

   nn.set(2, 2, 2);
   nn.randomize_parameters_normal();

   ds.set(2, 2, 2);
   ds.randomize_data_normal();
   
   gradient = sse.calculate_gradient();

   terms = sse.calculate_terms();
   terms_Jacobian = sse.calculate_terms_Jacobian();

   assert_true(((terms_Jacobian.calculate_transpose()).dot(terms)*2.0 - gradient).calculate_absolute_value() < 1.0e-3, LOG);
}
Exemplo n.º 15
0
void SumSquaredErrorTest::test_calculate_performance(void) {
  message += "test_calculate_performance\n";

  NeuralNetwork nn;
  Vector<double> parameters;

  DataSet ds;
  Matrix<double> data;

  SumSquaredError sse(&nn, &ds);

  double performance;

  // Test

  nn.set(1, 1);
  nn.initialize_parameters(0.0);

  ds.set(1, 1, 1);
  ds.initialize_data(0.0);

  performance = sse.calculate_performance();

  assert_true(performance == 0.0, LOG);

  // Test

  nn.set(1, 1, 1);
  nn.initialize_parameters(0.0);

  ds.set(1, 1, 1);
  ds.initialize_data(1.0);

  performance = sse.calculate_performance();

  assert_true(performance == 1.0, LOG);

  // Test

  nn.set(1, 1);
  nn.randomize_parameters_normal();

  parameters = nn.arrange_parameters();

  ds.set(1, 1, 1);
  ds.randomize_data_normal();

  assert_true(
      sse.calculate_performance() == sse.calculate_performance(parameters),
      LOG);

  // Test

  nn.set(1, 1);
  nn.randomize_parameters_normal();

  parameters = nn.arrange_parameters();

  ds.set(1, 1, 1);
  ds.randomize_data_normal();

  assert_true(sse.calculate_performance() !=
                  sse.calculate_performance(parameters * 2.0),
              LOG);
}
void TrainingRateAlgorithmTest::test_calculate_fixed_directional_point(void) {
  message += "test_calculate_fixed_directional_point\n";

  NeuralNetwork nn;

  Vector<double> parameters;

  PerformanceFunctional pf(&nn);

  pf.destruct_all_terms();
  pf.set_regularization_type(
      PerformanceFunctional::NEURAL_PARAMETERS_NORM_REGULARIZATION);

  double performance;
  Vector<double> gradient;

  TrainingRateAlgorithm tra(&pf);

  Vector<double> training_direction;
  double training_rate;

  Vector<double> directional_point;

  // Test

  nn.set(1, 1);

  nn.initialize_parameters(1.0);

  performance = pf.calculate_performance();

  gradient = pf.calculate_gradient();

  training_direction = gradient * (-1.0);
  training_rate = 0.001;

  directional_point = tra.calculate_fixed_directional_point(
      performance, training_direction, training_rate);

  assert_true(directional_point.size() == 2, LOG);
  assert_true(directional_point[1] < performance, LOG);

  assert_true(directional_point[1] ==
                  pf.calculate_performance(training_direction, training_rate),
              LOG);

  parameters = nn.arrange_parameters();

  nn.set_parameters(parameters + training_direction * training_rate);

  assert_true(directional_point[1] == pf.calculate_performance(), LOG);

  // Test

  nn.set(1, 1);

  nn.initialize_parameters(1.0);

  training_direction.set(2, -1.0);
  training_rate = 1.0;

  directional_point = tra.calculate_fixed_directional_point(
      3.14, training_direction, training_rate);

  assert_true(directional_point.size() == 2, LOG);
  assert_true(directional_point[0] == 1.0, LOG);
  assert_true(directional_point[1] == 0.0, LOG);
}
void LevenbergMarquardtAlgorithmTest::test_calculate_Hessian_approximation(void)
{
   message += "test_calculate_Hessian_approximation\n";

   NumericalDifferentiation nd;

   NeuralNetwork nn;

   size_t parameters_number;

   Vector<double> parameters;

   DataSet ds;

   PerformanceFunctional pf(&nn, &ds);

   pf.set_error_type(PerformanceFunctional::SUM_SQUARED_ERROR);

   Matrix<double> terms_Jacobian;
   Matrix<double> Hessian;
   Matrix<double> numerical_Hessian;
   Matrix<double> Hessian_approximation;

   LevenbergMarquardtAlgorithm lma(&pf);
   
   // Test

   nn.set(1, 2);
   nn.initialize_parameters(0.0);

   parameters_number = nn.count_parameters_number();

   ds.set(1,2,2);
   ds.initialize_data(0.0);

   terms_Jacobian = pf.calculate_terms_Jacobian();

   Hessian_approximation = lma.calculate_Hessian_approximation(terms_Jacobian);

   assert_true(Hessian_approximation.get_rows_number() == parameters_number, LOG);
   assert_true(Hessian_approximation.get_columns_number() == parameters_number, LOG);
   assert_true(Hessian_approximation.is_symmetric(), LOG);

   // Test

   pf.set_error_type(PerformanceFunctional::NORMALIZED_SQUARED_ERROR);

   nn.set(1,1,2);
   nn.randomize_parameters_normal();

   parameters_number = nn.count_parameters_number();

   ds.set(1,2,3);
   ds.randomize_data_normal();

   terms_Jacobian = pf.calculate_terms_Jacobian();

   Hessian_approximation = lma.calculate_Hessian_approximation(terms_Jacobian);

   assert_true(Hessian_approximation.get_rows_number() == parameters_number, LOG);
   assert_true(Hessian_approximation.get_columns_number() == parameters_number, LOG);
   assert_true(Hessian_approximation.is_symmetric(), LOG);

   // Test

   nn.set(2);

   nn.randomize_parameters_normal();

   MockErrorTerm* mptp = new MockErrorTerm(&nn);

   pf.set_user_error_pointer(mptp);

   terms_Jacobian = pf.calculate_terms_Jacobian();

   Hessian = pf.calculate_Hessian();

   lma.set_damping_parameter(0.0);

   assert_true((lma.calculate_Hessian_approximation(terms_Jacobian) - Hessian).calculate_absolute_value() < 1.0e-3, LOG);

   // Test

   pf.set_error_type(PerformanceFunctional::SUM_SQUARED_ERROR);

   ds.set(1, 1, 1);

   ds.randomize_data_normal();

   nn.set(1, 1);

   parameters = nn.arrange_parameters();

   nn.randomize_parameters_normal();

   numerical_Hessian = nd.calculate_Hessian(pf, &PerformanceFunctional::calculate_performance, parameters);

   terms_Jacobian = pf.calculate_terms_Jacobian();

   Hessian_approximation = lma.calculate_Hessian_approximation(terms_Jacobian);

   assert_true((numerical_Hessian - Hessian_approximation).calculate_absolute_value() >= 0.0, LOG);

}
void NeuralParametersNormTest::test_calculate_gradient(void)
{
   message += "test_calculate_gradient\n";

   NumericalDifferentiation nd;
   NeuralNetwork nn;
   NeuralParametersNorm npn(&nn);

   Vector<size_t> architecture;

   Vector<double> parameters;
   Vector<double> gradient;
   Vector<double> numerical_gradient;
   Vector<double> error;

   // Test 

   nn.set(1, 1, 1);
   nn.initialize_parameters(0.0);

   gradient = npn.calculate_gradient();

   assert_true(gradient.size() == nn.count_parameters_number(), LOG);
   assert_true(gradient == 0.0, LOG);

   // Test 

   nn.set(3, 4, 2);
   nn.initialize_parameters(0.0);

   gradient = npn.calculate_gradient();

   assert_true(gradient.size() == nn.count_parameters_number(), LOG);
   assert_true(gradient == 0.0, LOG);

   // Test

   architecture.set(3);
   architecture[0] = 5;
   architecture[1] = 1;
   architecture[2] = 2;

   nn.set(architecture);
   nn.initialize_parameters(0.0);

   npn.set_neural_network_pointer(&nn);

   gradient = npn.calculate_gradient();

   assert_true(gradient.size() == nn.count_parameters_number(), LOG);
   assert_true(gradient == 0.0, LOG);

   // Test 

   nn.set(3, 4, 2);
   nn.initialize_parameters(0.0);

   npn.set_neural_network_pointer(&nn);

   gradient = npn.calculate_gradient();

   assert_true(gradient.size() == nn.count_parameters_number(), LOG);
   assert_true(gradient == 0.0, LOG);


   // Test

   nn.initialize_parameters(1.0);
   parameters = nn.arrange_parameters();

   gradient = npn.calculate_gradient();
   numerical_gradient = nd.calculate_gradient(npn, &NeuralParametersNorm::calculate_regularization, parameters);
   error = (gradient - numerical_gradient).calculate_absolute_value();

   assert_true(error < 1.0e-3, LOG);
}
void MinkowskiErrorTest::test_calculate_gradient(void)
{
   message += "test_calculate_gradient\n";

   NumericalDifferentiation nd;

   NeuralNetwork nn;
   Vector<size_t> architecture;

   Vector<double> parameters;

   DataSet ds;

   MinkowskiError me(&nn, &ds);

   Vector<double> gradient;
   Vector<double> numerical_gradient;

   // Test

   nn.set(1,1,1);

   nn.initialize_parameters(0.0);

   ds.set(1,1,1);

   ds.initialize_data(0.0);

   gradient = me.calculate_gradient();

   assert_true(gradient.size() == nn.count_parameters_number(), LOG);
   assert_true(gradient == 0.0, LOG);

   // Test 

   nn.set(3,4,2);
   nn.initialize_parameters(0.0);

   ds.set(3, 2, 5);
   me.set(&nn, &ds);
   ds.initialize_data(0.0);

   gradient = me.calculate_gradient();

   assert_true(gradient.size() == nn.count_parameters_number(), LOG);
   assert_true(gradient == 0.0, LOG);

   // Test

   architecture.set(3);
   architecture[0] = 2;
   architecture[1] = 1;
   architecture[2] = 3;

   nn.set(architecture);
   nn.initialize_parameters(0.0);

   ds.set(2, 3, 5);
   me.set(&nn, &ds);
   ds.initialize_data(0.0);

   gradient = me.calculate_gradient();

   assert_true(gradient.size() == nn.count_parameters_number(), LOG);
   assert_true(gradient == 0.0, LOG);

   // Test

   nn.set(1,1,1);

   nn.initialize_parameters(0.0);

   ds.set(1,1,1);

   ds.initialize_data(0.0);

   gradient = me.calculate_gradient();

   assert_true(gradient.size() == nn.count_parameters_number(), LOG);
   assert_true(gradient == 0.0, LOG);

   // Test 

   nn.set(3,4,2);
   nn.initialize_parameters(0.0);

   ds.set(3,2,5);
   me.set(&nn, &ds);
   ds.initialize_data(0.0);

   gradient = me.calculate_gradient();

   assert_true(gradient.size() == nn.count_parameters_number(), LOG);
   assert_true(gradient == 0.0, LOG);

   // Test

   architecture.set(3);
   architecture[0] = 2;
   architecture[1] = 1;
   architecture[2] = 2;

   nn.set(architecture);
   nn.initialize_parameters(0.0);

   ds.set(2,2,3);
   me.set(&nn, &ds);
   ds.initialize_data(0.0);

   gradient = me.calculate_gradient();

   assert_true(gradient.size() == nn.count_parameters_number(), LOG);
   assert_true(gradient == 0.0, LOG);

   // Test

   architecture.set(4, 1);

   nn.set(architecture);
   nn.randomize_parameters_normal();

   parameters = nn.arrange_parameters();

   ds.set(1,1,1);
   ds.randomize_data_normal();

   gradient = me.calculate_gradient();
   numerical_gradient = nd.calculate_gradient(me, &MinkowskiError::calculate_error, parameters);

   assert_true((gradient - numerical_gradient).calculate_absolute_value() < 1.0e-3, LOG);

   // Test

   nn.set(5,4,3);
   nn.randomize_parameters_normal();

   parameters = nn.arrange_parameters();

   ds.set(2,5,3);
   ds.randomize_data_normal();

   me.set_Minkowski_parameter(1.75);

   gradient = me.calculate_gradient();
   numerical_gradient = nd.calculate_gradient(me, &MinkowskiError::calculate_error, parameters);
   assert_true((gradient - numerical_gradient).calculate_absolute_value() < 1.0e-3, LOG);
}
void SumSquaredErrorTest::test_calculate_gradient(void)
{
   message += "test_calculate_gradient\n";

   NumericalDifferentiation nd;
   DataSet ds;
   NeuralNetwork nn;
   SumSquaredError sse(&nn, &ds);

   Vector<size_t> architecture;

   Vector<double> parameters;
   Vector<double> gradient;
   Vector<double> numerical_gradient;
   Vector<double> error;

   // Test 

   nn.set(1, 1, 1);
   nn.initialize_parameters(0.0);

   ds.set(1, 1, 1);
   ds.initialize_data(0.0);

   gradient = sse.calculate_gradient();

   assert_true(gradient.size() == nn.count_parameters_number(), LOG);
   assert_true(gradient == 0.0, LOG);

   // Test 

   nn.set(3, 4, 2);
   nn.initialize_parameters(0.0);

   ds.set(3, 2, 5);
   sse.set(&nn, &ds);
   ds.initialize_data(0.0);

   gradient.clear();

   gradient = sse.calculate_gradient();

   assert_true(gradient.size() == nn.count_parameters_number(), LOG);
   assert_true(gradient == 0.0, LOG);

   // Test

   architecture.set(3);
   architecture[0] = 5;
   architecture[1] = 1;
   architecture[2] = 2;

   nn.set(architecture);
   nn.initialize_parameters(0.0);

   ds.set(5, 5, 2);
   sse.set(&nn, &ds);
   ds.initialize_data(0.0);

   gradient.clear();

   gradient = sse.calculate_gradient();

   assert_true(gradient.size() == nn.count_parameters_number(), LOG);
   assert_true(gradient == 0.0, LOG);

   // Test

   nn.set(1, 1, 1);

   nn.initialize_parameters(0.0);

   ds.set(1, 1, 1);

   ds.initialize_data(0.0);

   gradient.clear();

   gradient = sse.calculate_gradient();

   assert_true(gradient.size() == nn.count_parameters_number(), LOG);
   assert_true(gradient == 0.0, LOG);

   // Test 

   nn.set(3, 4, 2);
   nn.initialize_parameters(0.0);

   ds.set(3, 3, 2);
   sse.set(&nn, &ds);
   ds.initialize_data(0.0);

   gradient.clear();

   gradient = sse.calculate_gradient();

   assert_true(gradient.size() == nn.count_parameters_number(), LOG);
   assert_true(gradient == 0.0, LOG);

   // Test

   nn.set(2, 3, 4);
   nn.initialize_parameters(0.0);

   ds.set(2, 4, 5);
   sse.set(&nn, &ds);
   ds.initialize_data(0.0);

   gradient.clear();

   gradient = sse.calculate_gradient();

   assert_true(gradient.size() == nn.count_parameters_number(), LOG);
   assert_true(gradient == 0.0, LOG);

   // Test

   for(unsigned i = 0; i < 100; i++)
   {

   ds.initialize_data(1.0);

   nn.randomize_parameters_normal();
   parameters = nn.arrange_parameters();

   gradient.clear();

   gradient = sse.calculate_gradient();
   numerical_gradient = nd.calculate_gradient(sse, &SumSquaredError::calculate_error, parameters);
   error = (gradient - numerical_gradient).calculate_absolute_value();

   assert_true(error < 1.0e-3, LOG);
   }

   // Test

   nn.set(1, 1, 1);
   nn.initialize_parameters(1.0);
   parameters = nn.arrange_parameters();

   ds.set(1, 1, 1);

   ds.initialize_data(1.0);

   gradient.clear();

   gradient = sse.calculate_gradient();
   numerical_gradient = nd.calculate_gradient(sse, &SumSquaredError::calculate_error, parameters);
   assert_true((gradient - numerical_gradient).calculate_absolute_value() < 1.0e-3, LOG);

   // Test

   architecture.set(1000, 1);

   nn.set(architecture);
   nn.randomize_parameters_normal();

   ds.set(10, 1, 1);
   ds.randomize_data_normal();

   sse.set(&nn, &ds);

   gradient.clear();

   gradient = sse.calculate_gradient();
}
void NormalizedSquaredErrorTest::test_calculate_Jacobian_terms(void)
{
   message += "test_calculate_Jacobian_terms\n";

   NumericalDifferentiation nd;

   NeuralNetwork nn;
   Vector<int> hidden_layers_size;
   Vector<double> network_parameters;

   DataSet ds;

   NormalizedSquaredError nse(&nn, &ds);

   Vector<double> objective_gradient;

   Vector<double> evaluation_terms;
   Matrix<double> terms_Jacobian;
   Matrix<double> numerical_Jacobian_terms;

   // Test

   nn.set(1, 1);
   nn.randomize_parameters_normal();
   network_parameters = nn.arrange_parameters();

   ds.set(1, 1, 2);
   ds.randomize_data_normal();

   terms_Jacobian = nse.calculate_terms_Jacobian();
   numerical_Jacobian_terms = nd.calculate_Jacobian(nse, &NormalizedSquaredError::calculate_terms, network_parameters);

   assert_true((terms_Jacobian-numerical_Jacobian_terms).calculate_absolute_value() < 1.0e-3, LOG);

   // Test

   nn.set(2, 2, 2);
   nn.randomize_parameters_normal();
   network_parameters = nn.arrange_parameters();

   ds.set(2, 2, 2);
   ds.randomize_data_normal();

   terms_Jacobian = nse.calculate_terms_Jacobian();
   numerical_Jacobian_terms = nd.calculate_Jacobian(nse, &NormalizedSquaredError::calculate_terms, network_parameters);

   assert_true((terms_Jacobian-numerical_Jacobian_terms).calculate_absolute_value() < 1.0e-3, LOG);

   // Test

   nn.set(2,2,2);
   nn.randomize_parameters_normal();

   ds.set(2,2,2);
   ds.randomize_data_normal();
   
   objective_gradient = nse.calculate_gradient();

   evaluation_terms = nse.calculate_terms();
   terms_Jacobian = nse.calculate_terms_Jacobian();

   assert_true(((terms_Jacobian.calculate_transpose()).dot(evaluation_terms)*2.0 - objective_gradient).calculate_absolute_value() < 1.0e-3, LOG);

}
void SumSquaredErrorTest::test_calculate_loss(void)   
{
   message += "test_calculate_loss\n";

   NeuralNetwork nn;
   Vector<double> parameters;

   DataSet ds;
   Matrix<double> data;
   MissingValues* missing_values_pointer = ds.get_missing_values_pointer();

   SumSquaredError sse(&nn, &ds);

   double loss;

   // Test

   nn.set(1, 1);
   nn.initialize_parameters(0.0);

   ds.set(1, 1, 1);
   ds.initialize_data(0.0);

   loss = sse.calculate_error();

   assert_true(loss == 0.0, LOG);

   // Test

   nn.set(1, 1, 1);
   nn.initialize_parameters(0.0);

   ds.set(1, 1, 1);
   ds.initialize_data(1.0);

   loss = sse.calculate_error();

   assert_true(loss == 1.0, LOG);

   // Test

   nn.set(1, 1);
   nn.randomize_parameters_normal();

   parameters = nn.arrange_parameters();

   ds.set(1, 1, 1);
   ds.randomize_data_normal();

   assert_true(sse.calculate_error() == sse.calculate_error(parameters), LOG);

   // Test

   nn.set(1, 1);
   nn.randomize_parameters_normal();

   parameters = nn.arrange_parameters();

   ds.set(1, 1, 1);
   ds.randomize_data_normal();

   assert_true(sse.calculate_error() != sse.calculate_error(parameters*2.0), LOG);

   // Test

   nn.set(1, 1);
   nn.randomize_parameters_normal();

   parameters = nn.arrange_parameters();

   ds.set(1, 1, 1);
   ds.randomize_data_normal();

   missing_values_pointer->append(0, 0);

//   assert_true(sse.calculate_loss() == 0.0, LOG);
}
Exemplo n.º 23
0
void PerformanceFunctionalTest::test_calculate_Hessian(void) {
  message += "test_calculate_Hessian\n";

  NeuralNetwork nn;
  unsigned parameters_number;
  Vector<double> parameters;

  PerformanceFunctional pf(&nn);

  pf.destruct_all_terms();
  pf.set_regularization_type(
      PerformanceFunctional::NEURAL_PARAMETERS_NORM_REGULARIZATION);

  Matrix<double> Hessian;

  nn.set(1, 1, 1);

  nn.initialize_parameters(0.0);

  parameters_number = nn.count_parameters_number();
  parameters = nn.arrange_parameters();

  Hessian = pf.calculate_Hessian(parameters);

  assert_true(Hessian.get_rows_number() == parameters_number, LOG);
  assert_true(Hessian.get_columns_number() == parameters_number, LOG);

  nn.set();

  nn.initialize_parameters(0.0);

  parameters_number = nn.count_parameters_number();
  parameters = nn.arrange_parameters();

  Hessian = pf.calculate_Hessian(parameters);

  assert_true(Hessian.get_rows_number() == parameters_number, LOG);
  assert_true(Hessian.get_columns_number() == parameters_number, LOG);

  nn.set(1, 1);

  nn.initialize_parameters(0.0);

  parameters_number = nn.count_parameters_number();
  parameters = nn.arrange_parameters();

  Hessian = pf.calculate_Hessian(parameters);

  assert_true(Hessian.get_rows_number() == parameters_number, LOG);
  assert_true(Hessian.get_columns_number() == parameters_number, LOG);

  // Test

  parameters_number = nn.count_parameters_number();
  nn.initialize_parameters(0.0);

  MockPerformanceTerm* mptp = new MockPerformanceTerm(&nn);

  pf.set_user_objective_pointer(mptp);

  Hessian = pf.calculate_Hessian();

  assert_true(Hessian.get_rows_number() == parameters_number, LOG);
  assert_true(Hessian.get_columns_number() == parameters_number, LOG);
}