Example #1
0
TEST(TestNeuralNetwork, testIfBackpropGradientIsEqToNumerical)
{
    NeuralNetwork nn {2, 2};
    vector<float> weights {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f};

    vector<float> input;
    vector<float> expected;
    vector<float> backpropGradient;
    vector<float> numericalGradient;

    int iter = 0;
    bool lastBatch;

    input.resize(2);
    expected.resize(2);

    nn.setWeights(weights);
    nn.setActivationFunction(ActivationFunction::sigmoid());

    do {
        lastBatch = createAndOrSample(iter++, input, expected);
        nn.setInput(input);
        nn.calc();
        nn.calcNumericalGradient(expected, numericalGradient);
        nn.calcBackpropagationGradient(expected, backpropGradient);

        ASSERT_EQ(numericalGradient.size(), backpropGradient.size());
        for (int i = 0; i < numericalGradient.size(); ++i) {
            EXPECT_NEAR(numericalGradient[i], backpropGradient[i], 0.0001f);
            numericalGradient[i] = 0.0f;
            backpropGradient[i] = 0.0f;
        }
    } while(!lastBatch);
}
Example #2
0
TEST(TestNeuralNetwork, zeroInputAndWeights)
{
    NeuralNetwork nn {2, 2};
    vector<float> input {1.0f, 1.0f};
    vector<float> expected = {0.0f, 0.0f};
    vector<float> output;

    nn.setWeights([]() {
        return 0.0f;
    });
    nn.calc();
    nn.getOutput(output);

    ASSERT_EQ(expected.size(), output.size());
    for (int i = 0; i < expected.size(); ++i) {
        EXPECT_EQ(expected[i], output[i]);
    }
    EXPECT_EQ(0, nn.calcCost(expected));
}
Example #3
0
TEST(TestNeuralNetwork, feedFordward)
{
    NeuralNetwork nn {4, 3, 2};
    float curWeight = 1.0f;
    vector<float> input {0.1f, 0.2f, 0.3f, 0.4f};
    vector<float> expected {957.0f, 1177.0f};
    vector<float> output;

    nn.setWeights([&curWeight] {return curWeight++;});

    ASSERT_EQ(24, curWeight);

    nn.setInput(input);
    nn.calc();
    nn.getOutput(output);

    ASSERT_EQ(expected.size(), output.size());
    for (int i = 0; i < expected.size(); ++i)
        EXPECT_FLOAT_EQ(expected[i], output[i]);
}
Example #4
0
TEST(TestNeuralNetwork, activationFunction)
{
    int timesActivCalled = 0;
    ActivationFunction af([&timesActivCalled](float) {
        timesActivCalled++;
        return 1.0f;
    }, [](float) {
        return 0.0f;
    });
    NeuralNetwork nn {2, 3, 3, 2};
    vector<float> expected = {1.0f, 1.0f};
    vector<float> output;

    nn.setActivationFunction(af);
    nn.calc();
    nn.getOutput(output);

    ASSERT_EQ(expected.size(), output.size());
    for (int i = 0; i < expected.size(); ++i)
        EXPECT_FLOAT_EQ(expected[i], output[i]);
    EXPECT_EQ(8, timesActivCalled);
}
Example #5
0
static void testConvergence(NeuralNetwork &nn, void (NeuralNetwork::*algorithm)(const std::vector<float> &, std::vector<float> &), bool (*sampleGenerator)(int, std::vector<float>&, std::vector<float>&))
{
    vector<float> input;
    vector<float> expected;
    vector<float> gradient;
    const float alpha = 0.2f;

    float cost = 0.0f;
    float prevCost = 0.0f;

    input.resize(2);
    expected.resize(2);

    int sampleId = 0;

    for (int i = 0; i < 128; ++i) {
        bool lastInBatch = sampleGenerator(sampleId++, input, expected);

        nn.setInput(input);
        nn.calc();

        (nn.*algorithm)(expected, gradient);
        ASSERT_EQ(nn.getWeights().size(), gradient.size());
        cost += nn.calcCost(expected);

        if (lastInBatch) {
            nn.applyGradient(gradient, alpha);
            for (int w = 0; w < gradient.size(); ++w)
                gradient[w] = 0.0f;

            if (prevCost != 0.0f)
                ASSERT_LT(cost - prevCost, 0.0f);

            prevCost = cost;
            cost = 0.0f;
        }
    }
}