void ProbabilisticLayerTest::test_calculate_Jacobian(void) { message += "test_calculate_Jacobian\n"; NumericalDifferentiation nd; ProbabilisticLayer pl; Vector<double> inputs; Matrix<double> Jacobian; Matrix<double> numerical_Jacobian; // Test if(numerical_differentiation_tests) { pl.set_probabilistic_method(ProbabilisticLayer::Softmax); pl.set(3); inputs.set(3); inputs.randomize_normal(); Jacobian = pl.calculate_Jacobian(inputs); numerical_Jacobian = nd.calculate_Jacobian(pl, &ProbabilisticLayer::calculate_outputs, inputs); assert_true((Jacobian-numerical_Jacobian).calculate_absolute_value() < 1.0e-3, LOG); } }
void SumSquaredErrorTest::test_calculate_terms_Jacobian(void) { message += "test_calculate_terms_Jacobian\n"; NumericalDifferentiation nd; NeuralNetwork nn; Vector<size_t> architecture; Vector<double> parameters; DataSet ds; SumSquaredError sse(&nn, &ds); Vector<double> gradient; Vector<double> terms; Matrix<double> terms_Jacobian; Matrix<double> numerical_Jacobian_terms; // Test nn.set(1, 1); nn.initialize_parameters(0.0); ds.set(1, 1, 1); ds.initialize_data(0.0); terms_Jacobian = sse.calculate_terms_Jacobian(); assert_true(terms_Jacobian.get_rows_number() == ds.get_instances().get_instances_number(), LOG); assert_true(terms_Jacobian.get_columns_number() == nn.count_parameters_number(), LOG); assert_true(terms_Jacobian == 0.0, LOG); // Test nn.set(3, 4, 2); nn.initialize_parameters(0.0); ds.set(3, 2, 5); sse.set(&nn, &ds); ds.initialize_data(0.0); terms_Jacobian = sse.calculate_terms_Jacobian(); assert_true(terms_Jacobian.get_rows_number() == ds.get_instances().count_training_instances_number(), LOG); assert_true(terms_Jacobian.get_columns_number() == nn.count_parameters_number(), LOG); assert_true(terms_Jacobian == 0.0, LOG); // Test architecture.set(3); architecture[0] = 5; architecture[1] = 1; architecture[2] = 2; nn.set(architecture); nn.initialize_parameters(0.0); ds.set(5, 2, 3); sse.set(&nn, &ds); ds.initialize_data(0.0); terms_Jacobian = sse.calculate_terms_Jacobian(); assert_true(terms_Jacobian.get_rows_number() == ds.get_instances().count_training_instances_number(), LOG); assert_true(terms_Jacobian.get_columns_number() == nn.count_parameters_number(), LOG); assert_true(terms_Jacobian == 0.0, LOG); // Test nn.set(1, 1, 1); nn.randomize_parameters_normal(); parameters = nn.arrange_parameters(); ds.set(1, 1, 1); ds.randomize_data_normal(); terms_Jacobian = sse.calculate_terms_Jacobian(); numerical_Jacobian_terms = nd.calculate_Jacobian(sse, &SumSquaredError::calculate_terms, parameters); assert_true((terms_Jacobian-numerical_Jacobian_terms).calculate_absolute_value() < 1.0e-3, LOG); // Test nn.set(2, 2, 2); nn.randomize_parameters_normal(); parameters = nn.arrange_parameters(); ds.set(2, 2, 2); ds.randomize_data_normal(); terms_Jacobian = sse.calculate_terms_Jacobian(); numerical_Jacobian_terms = nd.calculate_Jacobian(sse, &SumSquaredError::calculate_terms, parameters); assert_true((terms_Jacobian-numerical_Jacobian_terms).calculate_absolute_value() < 1.0e-3, LOG); // Test nn.set(2, 2, 2); nn.randomize_parameters_normal(); ds.set(2, 2, 2); ds.randomize_data_normal(); gradient = sse.calculate_gradient(); terms = sse.calculate_terms(); terms_Jacobian = sse.calculate_terms_Jacobian(); assert_true(((terms_Jacobian.calculate_transpose()).dot(terms)*2.0 - gradient).calculate_absolute_value() < 1.0e-3, LOG); }
void MeanSquaredErrorTest::test_calculate_Jacobian_terms(void) { message += "test_calculate_Jacobian_terms\n"; NumericalDifferentiation nd; NeuralNetwork nn; Vector<unsigned> multilayer_perceptron_architecture; Vector<double> parameters; DataSet ds; MeanSquaredError mse(&nn, &ds); Vector<double> objective_gradient; Vector<double> evaluation_terms; Matrix<double> terms_Jacobian; Matrix<double> numerical_Jacobian_terms; // Test nn.set(1, 1); nn.initialize_parameters(0.0); ds.set(1, 1, 1); ds.initialize_data(0.0); terms_Jacobian = mse.calculate_terms_Jacobian(); assert_true(terms_Jacobian.get_rows_number() == ds.get_instances().count_training_instances_number(), LOG); assert_true(terms_Jacobian.get_columns_number() == nn.count_parameters_number(), LOG); assert_true(terms_Jacobian == 0.0, LOG); // Test nn.set(3, 4, 2); nn.initialize_parameters(0.0); ds.set(3, 2, 5); mse.set(&nn, &ds); ds.initialize_data(0.0); terms_Jacobian = mse.calculate_terms_Jacobian(); assert_true(terms_Jacobian.get_rows_number() == ds.get_instances().count_training_instances_number(), LOG); assert_true(terms_Jacobian.get_columns_number() == nn.count_parameters_number(), LOG); assert_true(terms_Jacobian == 0.0, LOG); // Test multilayer_perceptron_architecture.set(3); multilayer_perceptron_architecture[0] = 2; multilayer_perceptron_architecture[1] = 1; multilayer_perceptron_architecture[2] = 2; nn.set(multilayer_perceptron_architecture); nn.initialize_parameters(0.0); ds.set(2, 2, 5); mse.set(&nn, &ds); ds.initialize_data(0.0); terms_Jacobian = mse.calculate_terms_Jacobian(); assert_true(terms_Jacobian.get_rows_number() == ds.get_instances().count_training_instances_number(), LOG); assert_true(terms_Jacobian.get_columns_number() == nn.count_parameters_number(), LOG); assert_true(terms_Jacobian == 0.0, LOG); // Test nn.set(1, 1, 1); nn.randomize_parameters_normal(); parameters = nn.arrange_parameters(); ds.set(1, 1, 1); ds.randomize_data_normal(); terms_Jacobian = mse.calculate_terms_Jacobian(); numerical_Jacobian_terms = nd.calculate_Jacobian(mse, &MeanSquaredError::calculate_terms, parameters); assert_true((terms_Jacobian-numerical_Jacobian_terms).calculate_absolute_value() < 1.0e-3, LOG); // Test nn.set(2, 2, 2); nn.randomize_parameters_normal(); parameters = nn.arrange_parameters(); ds.set(2, 2, 2); ds.randomize_data_normal(); terms_Jacobian = mse.calculate_terms_Jacobian(); numerical_Jacobian_terms = nd.calculate_Jacobian(mse, &MeanSquaredError::calculate_terms, parameters); assert_true((terms_Jacobian-numerical_Jacobian_terms).calculate_absolute_value() < 1.0e-3, LOG); // Test nn.set(2, 2, 2); nn.randomize_parameters_normal(); ds.set(2, 2, 2); ds.randomize_data_normal(); objective_gradient = mse.calculate_gradient(); evaluation_terms = mse.calculate_terms(); terms_Jacobian = mse.calculate_terms_Jacobian(); assert_true(((terms_Jacobian.calculate_transpose()).dot(evaluation_terms)*2.0 - objective_gradient).calculate_absolute_value() < 1.0e-3, LOG); }
void NeuralNetworkTest::test_calculate_Jacobian(void) { message += "test_calculate_Jacobian\n"; // One layer NeuralNetwork nn; Vector<unsigned> multilayer_perceptron_architecture; Vector<double> inputs; Matrix<double> Jacobian; // Vector<double> inputs_minimum; // Vector<double> inputs_maximum; // Vector<double> inputs_mean; // Vector<double> inputs_standard_deviation; // Vector<double> outputs_minimum; // Vector<double> outputs_maximum; // Vector<double> outputs_mean; // Vector<double> outputs_standard_deviation; // mmlp.set_display(false); NumericalDifferentiation nd; Matrix<double> numerical_Jacobian; // Test nn.set(1, 1, 1); nn.initialize_parameters(0.0); inputs.set(1, 0.0); Jacobian = nn.calculate_Jacobian(inputs); assert_true(Jacobian == 0.0, LOG); // Test nn.set(3, 4, 2); nn.initialize_parameters(0.0); inputs.set(3, 0.0); Jacobian = nn.calculate_Jacobian(inputs); assert_true(Jacobian == 0.0, LOG); // Test if (numerical_differentiation_tests) { nn.set(3, 4, 2); nn.initialize_parameters(0.0); inputs.set(3, 0.0); Jacobian = nn.calculate_Jacobian(inputs); numerical_Jacobian = nd.calculate_Jacobian(nn, &NeuralNetwork::calculate_outputs, inputs); assert_true( (Jacobian - numerical_Jacobian).calculate_absolute_value() < 1.0e-3, LOG); } // Test multilayer_perceptron_architecture.set(3, 1); nn.set(multilayer_perceptron_architecture); nn.initialize_parameters(0.0); inputs.set(1, 0.0); Jacobian = nn.calculate_Jacobian(inputs); assert_true(Jacobian == 0.0, LOG); // Test multilayer_perceptron_architecture.set(3); multilayer_perceptron_architecture[0] = 3; multilayer_perceptron_architecture[1] = 4; multilayer_perceptron_architecture[2] = 1; nn.set(multilayer_perceptron_architecture); nn.initialize_parameters(0.0); inputs.set(3, 0.0); Jacobian = nn.calculate_Jacobian(inputs); assert_true(Jacobian == 0.0, LOG); // Test if (numerical_differentiation_tests) { multilayer_perceptron_architecture.set(3); multilayer_perceptron_architecture[0] = 3; multilayer_perceptron_architecture[1] = 4; multilayer_perceptron_architecture[2] = 1; nn.set(multilayer_perceptron_architecture); inputs.set(3); inputs[0] = 0.0; inputs[1] = 1.0; inputs[2] = 2.0; Jacobian = nn.calculate_Jacobian(inputs); numerical_Jacobian = nd.calculate_Jacobian(nn, &NeuralNetwork::calculate_outputs, inputs); assert_true( (Jacobian - numerical_Jacobian).calculate_absolute_value() < 1.0e-3, LOG); } // Scaling and unscaling test // if(numerical_differentiation_tests) // { // nn.set(2, 3); // nn.set_variables_scaling_method(NeuralNetwork::MinimumMaximum); // nn.set_input_minimum(0, -0.3); // nn.set_input_minimum(1, -0.2); // nn.set_input_maximum(0, 0.0); // nn.set_input_maximum(1, 0.1); // nn.set_output_minimum(0, -1.0); // nn.set_output_minimum(1, -4.1); // nn.set_output_minimum(2, -8.2); // nn.set_output_maximum(0, 1.0); // nn.set_output_maximum(1, 7.2); // nn.set_output_maximum(2, 6.0); // inputs.set(2); // inputs.randomize_normal(); // Jacobian = nn.calculate_Jacobian(inputs); // numerical_Jacobian = nd.calculate_Jacobian(nn, // &NeuralNetwork::calculate_outputs, inputs); // assert_true((Jacobian-numerical_Jacobian).calculate_absolute_value() < // 1.0e-3, LOG); // } // Scaling and unscaling test // if(numerical_differentiation_tests) // { // nn.set(2, 3); // nn.set_variables_scaling_method(NeuralNetwork::MeanStandardDeviation); // nn.set_input_mean(0, -0.3); // nn.set_input_mean(1, -0.2); // nn.set_input_standard_deviation(0, 0.2); // nn.set_input_standard_deviation(1, 0.1); // nn.set_output_mean(0, -1.0); // nn.set_output_mean(1, -4.1); // nn.set_output_mean(2, -8.2); // nn.set_output_standard_deviation(0, 1.0); // nn.set_output_standard_deviation(1, 7.2); // nn.set_output_standard_deviation(2, 6.0); // inputs.set(2); // inputs.randomize_normal(); // Jacobian = nn.calculate_Jacobian(inputs); // numerical_Jacobian = nd.calculate_Jacobian(nn, // &NeuralNetwork::calculate_outputs, inputs); // assert_true((Jacobian-numerical_Jacobian).calculate_absolute_value() < // 1.0e-3, LOG); // } // Conditions test // mmlp.set(1, 1, 1); // mmlp.initialize_parameters(0.0); // inputs.set(1, 0.0); // Jacobian = mmlp.calculate_Jacobian(inputs); // assert_true(Jacobian == 0.0, LOG); // Conditions test // Lower and upper bounds test // Probabilistic postprocessing test }
void NormalizedSquaredErrorTest::test_calculate_Jacobian_terms(void) { message += "test_calculate_Jacobian_terms\n"; NumericalDifferentiation nd; NeuralNetwork nn; Vector<int> hidden_layers_size; Vector<double> network_parameters; DataSet ds; NormalizedSquaredError nse(&nn, &ds); Vector<double> objective_gradient; Vector<double> evaluation_terms; Matrix<double> terms_Jacobian; Matrix<double> numerical_Jacobian_terms; // Test nn.set(1, 1); nn.randomize_parameters_normal(); network_parameters = nn.arrange_parameters(); ds.set(1, 1, 2); ds.randomize_data_normal(); terms_Jacobian = nse.calculate_terms_Jacobian(); numerical_Jacobian_terms = nd.calculate_Jacobian(nse, &NormalizedSquaredError::calculate_terms, network_parameters); assert_true((terms_Jacobian-numerical_Jacobian_terms).calculate_absolute_value() < 1.0e-3, LOG); // Test nn.set(2, 2, 2); nn.randomize_parameters_normal(); network_parameters = nn.arrange_parameters(); ds.set(2, 2, 2); ds.randomize_data_normal(); terms_Jacobian = nse.calculate_terms_Jacobian(); numerical_Jacobian_terms = nd.calculate_Jacobian(nse, &NormalizedSquaredError::calculate_terms, network_parameters); assert_true((terms_Jacobian-numerical_Jacobian_terms).calculate_absolute_value() < 1.0e-3, LOG); // Test nn.set(2,2,2); nn.randomize_parameters_normal(); ds.set(2,2,2); ds.randomize_data_normal(); objective_gradient = nse.calculate_gradient(); evaluation_terms = nse.calculate_terms(); terms_Jacobian = nse.calculate_terms_Jacobian(); assert_true(((terms_Jacobian.calculate_transpose()).dot(evaluation_terms)*2.0 - objective_gradient).calculate_absolute_value() < 1.0e-3, LOG); }