예제 #1
0
TEST(TestNeuralNetwork, getWeights)
{
    NeuralNetwork nn {400, 25, 10};
    EXPECT_EQ(401*25 + 26 * 10, nn.getWeights().size());

    for (auto w : nn.getWeights()) {
        EXPECT_FLOAT_EQ(0.0f, w);
    }
}
예제 #2
0
TEST(TestNeuralNetwork, setWeights)
{
    NeuralNetwork nn {400, 25, 10};
    float curWeight = 0.0f;

    nn.setWeights([&curWeight] {return curWeight++;});

    for (int i = 0; i < nn.getWeights().size(); ++i) {
        EXPECT_FLOAT_EQ((float)i, nn.getWeights()[i]);
    }
}
예제 #3
0
static void testConvergence(NeuralNetwork &nn, void (NeuralNetwork::*algorithm)(const std::vector<float> &, std::vector<float> &), bool (*sampleGenerator)(int, std::vector<float>&, std::vector<float>&))
{
    vector<float> input;
    vector<float> expected;
    vector<float> gradient;
    const float alpha = 0.2f;

    float cost = 0.0f;
    float prevCost = 0.0f;

    input.resize(2);
    expected.resize(2);

    int sampleId = 0;

    for (int i = 0; i < 128; ++i) {
        bool lastInBatch = sampleGenerator(sampleId++, input, expected);

        nn.setInput(input);
        nn.calc();

        (nn.*algorithm)(expected, gradient);
        ASSERT_EQ(nn.getWeights().size(), gradient.size());
        cost += nn.calcCost(expected);

        if (lastInBatch) {
            nn.applyGradient(gradient, alpha);
            for (int w = 0; w < gradient.size(); ++w)
                gradient[w] = 0.0f;

            if (prevCost != 0.0f)
                ASSERT_LT(cost - prevCost, 0.0f);

            prevCost = cost;
            cost = 0.0f;
        }
    }
}