Exemplo n.º 1
0
TEST(TestNeuralNetwork, costFunction)
{
    int called = 0;
    CostFunction c([&called](float, float expected) {
        called++;
        return expected * 64.0f;
    });

    NeuralNetwork nn {2, 2};
    vector<float> expected {1.0f, 0.5f};

    nn.setCostFunction(c);

    EXPECT_FLOAT_EQ((64.0f + 32.0f), nn.calcCost(expected));
    EXPECT_EQ(2, called);
}
Exemplo n.º 2
0
TEST(TestNeuralNetwork, zeroInputAndWeights)
{
    NeuralNetwork nn {2, 2};
    vector<float> input {1.0f, 1.0f};
    vector<float> expected = {0.0f, 0.0f};
    vector<float> output;

    nn.setWeights([]() {
        return 0.0f;
    });
    nn.calc();
    nn.getOutput(output);

    ASSERT_EQ(expected.size(), output.size());
    for (int i = 0; i < expected.size(); ++i) {
        EXPECT_EQ(expected[i], output[i]);
    }
    EXPECT_EQ(0, nn.calcCost(expected));
}
Exemplo n.º 3
0
static void testConvergence(NeuralNetwork &nn, void (NeuralNetwork::*algorithm)(const std::vector<float> &, std::vector<float> &), bool (*sampleGenerator)(int, std::vector<float>&, std::vector<float>&))
{
    vector<float> input;
    vector<float> expected;
    vector<float> gradient;
    const float alpha = 0.2f;

    float cost = 0.0f;
    float prevCost = 0.0f;

    input.resize(2);
    expected.resize(2);

    int sampleId = 0;

    for (int i = 0; i < 128; ++i) {
        bool lastInBatch = sampleGenerator(sampleId++, input, expected);

        nn.setInput(input);
        nn.calc();

        (nn.*algorithm)(expected, gradient);
        ASSERT_EQ(nn.getWeights().size(), gradient.size());
        cost += nn.calcCost(expected);

        if (lastInBatch) {
            nn.applyGradient(gradient, alpha);
            for (int w = 0; w < gradient.size(); ++w)
                gradient[w] = 0.0f;

            if (prevCost != 0.0f)
                ASSERT_LT(cost - prevCost, 0.0f);

            prevCost = cost;
            cost = 0.0f;
        }
    }
}