TEST( testlogicaloperators, Convolve_1layerbiased_Or ) { cout << "Or, convolve" << endl; LogicalDataCreator ldc; ldc.applyOrGate(); EasyCL *cl = EasyCL::createForFirstGpuOtherwiseCpu(); NeuralNet *net = NeuralNet::maker(cl)->planes(2)->imageSize(1)->instance(); net->addLayer( ConvolutionalMaker::instance()->numFilters(2)->filterSize(1)->biased(1) ); net->addLayer( SquareLossMaker::instance() );; SGD *sgd = SGD::instance( cl, 0.1f, 0 ); for( int epoch = 0; epoch < 20; epoch++ ) { net->epochMaker(sgd)->batchSize(4)->numExamples(4)->inputData(ldc.data) ->expectedOutputs(ldc.expectedOutput)->run( epoch ); if( epoch % 5 == 0 ) cout << "Loss L " << net->calcLoss(ldc.expectedOutput) << endl; // AccuracyHelper::printAccuracy( ldc.N, 2, ldc.labels, net->getOutput() ); // net->printWeights(); } // net->print(); AccuracyHelper::printAccuracy( ldc.N, 2, ldc.labels, net->getOutput() ); float loss = net->calcLoss(ldc.expectedOutput); cout << "loss, E, " << loss << endl; EXPECT_GE( 0.4f, loss ); delete sgd; delete net; delete cl; }
TEST( testforward, softmax_byplane ) { EasyCL *cl = EasyCL::createForFirstGpuOtherwiseCpu(); NeuralNet *net = NeuralNet::maker(cl)->imageSize(2)->planes(1)->instance(); net->addLayer( SoftMaxMaker::instance()->perPlane() ); net->setBatchSize( 1 ); int imageSizeSquared = net->getLayer(0)->getOutputSize() * net->getLayer(0)->getOutputSize(); float *input = new float[imageSizeSquared]; input[0] = 0; input[1] = 1; input[2] = 3; input[3] = 2; net->forward( input ); float const*output = net->getOutput(); float sum = 0; for( int i = 0; i < imageSizeSquared; i++ ) { cout << "output[" << i << "]=" << output[i] << endl; sum += output[i]; EXPECT_LE( 0, output[i] ); EXPECT_GE( 1, output[i] ); } EXPECT_FLOAT_NEAR( 1.0f, sum ); EXPECT_FLOAT_NEAR( (float)( exp(0.0f)/(exp(0.0f)+exp(1.0f)+exp(3.0f)+exp(2.0f)) ), output[0] ); EXPECT_FLOAT_NEAR( (float)( exp(1.0f)/(exp(0.0f)+exp(1.0f)+exp(3.0f)+exp(2.0f)) ), output[1] ); EXPECT_FLOAT_NEAR( (float)( exp(3.0f)/(exp(0.0f)+exp(1.0f)+exp(3.0f)+exp(2.0f)) ), output[2] ); EXPECT_FLOAT_NEAR( (float)( exp(2.0f)/(exp(0.0f)+exp(1.0f)+exp(3.0f)+exp(2.0f)) ), output[3] ); float *expected = new float[imageSizeSquared]; memset( expected, 0, sizeof(float) * imageSizeSquared ); expected[2] = 1; float loss = net->calcLoss( expected ); cout << "loss " << loss << endl; EXPECT_LT( 0, loss ); EXPECT_FLOAT_NEAR( - log(output[2]), loss ); memset( expected, 0, sizeof(float) * imageSizeSquared ); expected[0] = 1; loss = net->calcLoss( expected ); cout << "loss " << loss << endl; EXPECT_LT( 0, loss ); EXPECT_FLOAT_NEAR( - log(output[0]), loss ); memset( expected, 0, sizeof(float) * imageSizeSquared ); expected[1] = 1; loss = net->calcLoss( expected ); cout << "loss " << loss << endl; EXPECT_LT( 0, loss ); EXPECT_FLOAT_NEAR( - log(output[1]), loss ); memset( expected, 0, sizeof(float) * imageSizeSquared ); expected[3] = 1; loss = net->calcLoss( expected ); cout << "loss " << loss << endl; EXPECT_LT( 0, loss ); EXPECT_FLOAT_NEAR( - log(output[3]), loss ); delete[] input; delete[] expected; delete net; delete cl; }
TEST( testlogicaloperators, DISABLED_Convolve_1layer_And_Nobias ) { cout << "And" << endl; LogicalDataCreator ldc; ldc.applyAndGate(); EasyCL *cl = EasyCL::createForFirstGpuOtherwiseCpu(); NeuralNet *net = NeuralNet::maker(cl)->planes(2)->imageSize(1)->instance(); net->addLayer( ConvolutionalMaker::instance()->numFilters(2)->filterSize(1)->biased(0) ); SGD *sgd = SGD::instance( cl, 4.0f, 0 ); for( int epoch = 0; epoch < 20; epoch++ ) { net->epochMaker(sgd)->batchSize(4)->numExamples(4)->inputData(ldc.data) ->expectedOutputs(ldc.expectedOutput)->run(epoch); cout << "Loss L " << net->calcLoss(ldc.expectedOutput) << endl; // net->printWeights(); } // net->print(); int numCorrect = AccuracyHelper::calcNumRight( ldc.N, 2, ldc.labels, net->getOutput() ); cout << "accuracy: " << numCorrect << "/" << ldc.N << endl; EXPECT_EQ( numCorrect, ldc.N ); delete sgd; delete net; delete cl; }
//layer2 plane0=0 "planes not both -1 and planes not both 1" // weights = plane0*(-1) + plane1*(-1) // plane1=1 "planes both -1 or planes both 1" // weights = plane0*(1) + plane1*(1) TEST( testlogicaloperators, Convolve_2layers_relu_Xor ) { cout << "Xor, convolve" << endl; // LogicalDataCreator ldc(new TanhActivation()); // ldc.applyXorGate(); // int imageSize = 1; // int inPlanes = 2; int numExamples = 4; // int filterSize = 1; float data[] = { -1, -1, -1, 1, 1, -1, 1, 1 }; float layer1weights[] = { // going to preset these, to near an optimal solution, // and at least show the network is stable, and gives the correct -0.4f,-0.55f, // result... 0.52f, 0.53f, }; float layer1bias[] = { 0.1f, -0.1f }; float layer2weights[] = { 1.1f, 0.9f, -0.8f, -1.2f }; float layer2bias[] = { 0.1f, 1.1 }; float expectedOutput[] = { 1, 0, 0, 1, 0, 1, 1, 0 }; int labels[] = { 0, 1, 1, 0 }; EasyCL *cl = EasyCL::createForFirstGpuOtherwiseCpu(); NeuralNet *net = NeuralNet::maker(cl)->planes(2)->imageSize(1)->instance(); net->addLayer( ConvolutionalMaker::instance()->numFilters(2)->filterSize(1)->biased(1) ); net->addLayer( ActivationMaker::instance()->relu() ); net->addLayer( ConvolutionalMaker::instance()->numFilters(2)->filterSize(1)->biased(1) ); net->addLayer( ActivationMaker::instance()->relu() ); net->addLayer( SquareLossMaker::instance() );; cout << "hand-setting weights..." << endl; net->initWeights( 1, layer1weights, layer1bias ); net->initWeights( 3, layer2weights, layer2bias ); // net->printWeights(); // net->setBatchSize(4); // net->forward( data ); // net->print(); SGD *sgd = SGD::instance( cl, 0.1f, 0 ); for( int epoch = 0; epoch < 200; epoch++ ) { net->epochMaker(sgd)->batchSize(numExamples)->numExamples(numExamples)->inputData(data) ->expectedOutputs(expectedOutput)->run( epoch ); if( epoch % 5 == 0 ) cout << "Loss L " << net->calcLoss(expectedOutput) << endl; } net->print(); AccuracyHelper::printAccuracy( numExamples, 2, labels, net->getOutput() ); float loss = net->calcLoss(expectedOutput); cout << "loss, E, " << loss << endl; EXPECT_GE( 0.0000001f, loss ); delete sgd; delete net; delete cl; }