NeuralNet::NeuralNet(size_t no_inputs, size_t no_hidden, size_t no_outputs, ActivationFunc& hidden_act, ActivationFunc& output_act) { NNLayer hidden_layer(no_inputs, no_hidden, hidden_act); NNLayer output_layer(no_hidden, no_outputs, output_act); add_layer(hidden_layer); add_layer(output_layer); }
//------------------ANN construction------------------------------------------ void ANN (int N,float In, float * w_ji, float * w_kj, float b, float * y_pj,float & y_pk) { float x_pi; input_layer(In,x_pi); hidden_layer(N,x_pi, w_ji, y_pj); output_layer(N,y_pj,w_kj,b,y_pk); }
void TestHiddenLayer_Backward() { double epsilon = 1e-3; int n_outputs = 2; int n_inputs = 3; HiddenLayer hidden_layer(n_inputs); OutputLayer output_layer(n_outputs); std::vector<double> hidden_values; hidden_values.push_back(-2.0); hidden_values.push_back(1.0); hidden_values.push_back(3.0); WeightMatrix weights(hidden_layer, output_layer); weights.set(0, 0, 0.5); weights.set(1, 0, -2.0); weights.set(2, 0, 1.5); weights.set(0, 1, 1.0); weights.set(1, 1, 0.7); weights.set(2, 1, -1.0); weights.setBias(0, 0.8); weights.setBias(1, -0.3); hidden_layer.receiveInput(hidden_values); std::vector<double> transition = weights.fire(hidden_layer); output_layer.receiveInput(transition); assert(output_layer.getInput(0) == 3.3); assert(output_layer.getInput(1) == -2.6); // backpropagation step std::vector<double> actual_outputs; actual_outputs.push_back(1.0); actual_outputs.push_back(1.0); output_layer.computeGradient(actual_outputs); weights.computeGradient(hidden_layer, output_layer); hidden_layer.computeGradient(weights, output_layer); assert(hidden_layer.getPartialDerivative(0) < 0.0 + epsilon && hidden_layer.getPartialDerivative(0) > 0.0 - epsilon); assert(hidden_layer.getPartialDerivative(1) < -3.56 + epsilon && hidden_layer.getPartialDerivative(1) > -3.56 - epsilon); assert(hidden_layer.getPartialDerivative(2) < 3.525 + epsilon && hidden_layer.getPartialDerivative(2) > 3.525 - epsilon); printPass("TestHiddenLayer_Backward()"); }