TEST( SLOW_testforward, perf_kgsgo_fc500 ) { int batchSize = 128; LayerDimensions dim; dim.setInputPlanes( 32 ).setInputSize(19).setNumFilters( 500 ).setFilterSize( 19 ) .setPadZeros( false ).setBiased( true ); testPerf( -1, 128, batchSize, dim ); }
TEST( SLOW_testforward, perf_mnist_finallayer ) { int batchSize = 128; LayerDimensions dim; dim.setInputPlanes( 32 ).setInputSize(28).setNumFilters( 10 ).setFilterSize( 28 ) .setPadZeros( false ).setBiased( true ); testPerf( -1, 128, batchSize, dim ); }
TEST( SLOW_testforward, perf_mnist_intlayers_1024ex ) { int batchSize = 1024; LayerDimensions dim; dim.setInputPlanes( 32 ).setInputSize(28).setNumFilters( 32 ).setFilterSize( 5 ) .setPadZeros( true ).setBiased( true ); testPerf( -1, 128, batchSize, dim ); }
TEST( SLOW_testpropagate, perf_mnist_finallayer ) { int batchSize = 128; LayerDimensions dim; dim.setInputPlanes( 32 ).setInputImageSize(28).setNumFilters( 10 ).setFilterSize( 28 ) .setPadZeros( false ).setBiased( true ); testPerf( -1, 128, batchSize, dim, new ReluActivation() ); }
TEST( SLOW_testpropagate, perf_kgsgo_fc500 ) { int batchSize = 128; LayerDimensions dim; dim.setInputPlanes( 32 ).setInputImageSize(19).setNumFilters( 500 ).setFilterSize( 19 ) .setPadZeros( false ).setBiased( true ); testPerf( -1, 128, batchSize, dim, new TanhActivation() ); }
TEST( SLOW_testpropagate, compare_args ) { LayerDimensions dim; int batchSize = 128; // int imageSize = 19; // int filterSize = 7; // int inputPlanes = 64; // int numFilters = 64; int instance0 = 1; int instance1 = 3; int N = 128; bool debug = false; string activationName = "tanh"; dim.setInputPlanes( 64 ).setInputImageSize(19).setNumFilters( 64 ) .setFilterSize( 7 ) .setPadZeros( true ).setBiased( false ); TestArgsParser::arg( "n", &N ); DimFromArgs::arg( &dim ); TestArgsParser::arg( "instance0", &instance0 ); TestArgsParser::arg( "instance1", &instance1 ); TestArgsParser::arg( "debug", &debug ); TestArgsParser::arg( "batchsize", &batchSize ); TestArgsParser::arg( "activation", &activationName ); TestArgsParser::go(); dim.deriveOthers(); ActivationFunction *fn = ActivationFunction::fromName( activationName ); compareSpecific( debug, N, batchSize, dim, fn, instance0, instance1 ); }
TEST( testforward, compare_1_n_biased_pad ) { EasyCL *cl = EasyCL::createForFirstGpuOtherwiseCpu(); int maxWorkgroupSize = cl->getMaxWorkgroupSize(); delete cl; LayerDimensions dim; int batchSize = 4; int N = 4; string activationName = "tanh"; dim.setInputPlanes( 8 ).setInputSize(19).setNumFilters( 8 ) .setFilterSize( 5 ) .setPadZeros( true ).setBiased( true ); for( int instance = 2; instance <= 7; instance++ ) { if( instance == 5 ) { continue; // forwardfc, cant use for inputimagesize != filtersize } dim.setInputSize(19); if(instance == 2 && maxWorkgroupSize < 19 * 19) { dim.setInputSize(15); } if(instance == 3 && maxWorkgroupSize < 19 * 19) { dim.setInputSize(15); } cout << "instance: " << instance << endl; compareSpecific( false, N, batchSize, dim, 1, instance ); } }
TEST( testforward, compare_1_4_fcscenario ) { // only need to do nopad, since fc wont work with pad LayerDimensions dim; int batchSize = 4; int N = 4; dim.setInputPlanes( 10 ).setInputSize(24).setNumFilters( 10 ) .setFilterSize( 24 ) .setPadZeros( false ).setBiased( true ); compareSpecific( false, N, batchSize, dim, 1, 4 ); }
TEST(SLOW_testbackward, perf_kgsgo_32c5) { int batchSize = 128; LayerDimensions dim; dim.setInputPlanes(32).setInputSize(19).setNumFilters(32).setFilterSize(5) .setPadZeros(true).setBiased(true); cout << dim.buildOptionsString() << endl; // ActivationFunction *fn = new ReluActivation(); measurePerf(2, batchSize, dim); }
TEST( testforward, compare_1_5_biased_nopad ) { // only need to do nopad, since fc wont work with pad LayerDimensions dim; int batchSize = 4; // int instance0 = 1; // int instance1 = 1; int N = 4; dim.setInputPlanes( 8 ).setInputSize(19).setNumFilters( 8 ) .setFilterSize( 19 ) .setPadZeros( false ).setBiased( true ); compareSpecific( false, N, batchSize, dim, 1, 5 ); }
TEST(SLOW_testbackward, compare_kgsgo_32c5mini) { int batchSize = 4; LayerDimensions dim; dim.setInputPlanes(2).setInputSize(3).setNumFilters(2).setFilterSize(3) .setPadZeros(true).setBiased(true); cout << dim.buildOptionsString() << endl; // ActivationFunction *fn = new ReluActivation(); compareSpecific(1, 2, 1, batchSize, dim); }
TEST( testpropagate, compare_1_4_fcscenario ) { // only need to do nopad, since fc wont work with pad LayerDimensions dim; int batchSize = 4; int N = 4; string activationName = "tanh"; dim.setInputPlanes( 10 ).setInputImageSize(24).setNumFilters( 10 ) .setFilterSize( 24 ) .setPadZeros( false ).setBiased( true ); ActivationFunction *fn = ActivationFunction::fromName( activationName ); compareSpecific( false, N, batchSize, dim, fn, 1, 4 ); }
TEST( testforward, compare_0_1_biased_pad ) { LayerDimensions dim; int batchSize = 4; // int instance0 = 1; // int instance1 = 1; int N = 4; string activationName = "tanh"; dim.setInputPlanes( 8 ).setInputSize(19).setNumFilters( 8 ) .setFilterSize( 5 ) .setPadZeros( true ).setBiased( true ); compareSpecific( false, N, batchSize, dim, 0, 1 ); }
TEST( SLOW_testforward, soumith2 ) { int batchSize = 128; LayerDimensions dim; int instance = 4; bool biased = true; TestArgsParser::arg( "instance", &instance ); TestArgsParser::arg( "biased", &biased ); TestArgsParser::go(); dim.setInputPlanes( 64 ).setInputSize( 64 ).setNumFilters( 128 ).setFilterSize( 9 ) .setPadZeros( false ).setBiased( biased ); testPerf( instance, 128, batchSize, dim ); }
TEST(testbackward, compare_1_n_kgsgo_32c5) { int batchSize = 8; LayerDimensions dim; dim.setInputPlanes(32).setInputSize(19).setNumFilters(32).setFilterSize(5) .setPadZeros(true).setBiased(true); cout << dim.buildOptionsString() << endl; // ActivationFunction *fn = new ReluActivation(); compareSpecific(0, 1, 1, batchSize, dim); for(int i=2; i < Backward::getNumImplementations(); i++) { compareSpecific(1, i, 1, batchSize, dim); } }
TEST( testpropagate, compare_0_1_biased_pad ) { LayerDimensions dim; int batchSize = 4; // int instance0 = 1; // int instance1 = 1; int N = 4; string activationName = "tanh"; dim.setInputPlanes( 8 ).setInputImageSize(19).setNumFilters( 8 ) .setFilterSize( 5 ) .setPadZeros( true ).setBiased( true ); ActivationFunction *fn = ActivationFunction::fromName( activationName ); compareSpecific( false, N, batchSize, dim, fn, 0, 1 ); }
TEST( testpropagate, compare_1_5_biased_nopad ) { // only need to do nopad, since fc wont work with pad LayerDimensions dim; int batchSize = 4; // int instance0 = 1; // int instance1 = 1; int N = 4; string activationName = "tanh"; dim.setInputPlanes( 8 ).setInputImageSize(19).setNumFilters( 8 ) .setFilterSize( 19 ) .setPadZeros( false ).setBiased( true ); ActivationFunction *fn = ActivationFunction::fromName( activationName ); compareSpecific( false, N, batchSize, dim, fn, 1, 5 ); }
TEST( SLOW_testforward, perf_kgsgo_64c7_args ) { int instance = 3; int batchSize = 128; int N = 1000; LayerDimensions dim; dim.setInputPlanes( 64 ).setInputSize(19).setNumFilters( 64 ).setFilterSize( 7 ) .setPadZeros( true ).setBiased( true ); DimFromArgs::arg( &dim ); TestArgsParser::arg( "instance", &instance ); TestArgsParser::arg( "n", &N ); TestArgsParser::arg( "batchsize", &batchSize ); TestArgsParser::go(); testPerf( instance, N, batchSize, dim ); }
TEST( testforward, crash_from_jm ) { int instance = 1; int batchSize = 64; int N = 64; LayerDimensions dim; dim.setInputPlanes( 32 ).setInputSize(28).setNumFilters( 20 ).setFilterSize( 28 ) .setPadZeros( false ).setBiased( false ); DimFromArgs::arg( &dim ); TestArgsParser::arg( "instance", &instance ); TestArgsParser::arg( "n", &N ); TestArgsParser::arg( "batchsize", &batchSize ); TestArgsParser::go(); testPerf( instance, N, batchSize, dim ); }
TEST( testforward, compare_1_n_biased_nopad ) { LayerDimensions dim; int batchSize = 4; // int instance0 = 1; // int instance1 = 1; int N = 4; string activationName = "tanh"; dim.setInputPlanes( 8 ).setInputSize(19).setNumFilters( 8 ) .setFilterSize( 5 ) .setPadZeros( false ).setBiased( true ); for( int instance = 2; instance <= 7; instance++ ) { if( instance == 5 ) { continue; // forwardfc, cant use for inputimagesize != filtersize } cout << "instance: " << instance << endl; compareSpecific( false, N, batchSize, dim, 1, instance ); } }
TEST( testforward, comparespecific_break2 ) { // this breaks on v5.7.0 for example LayerDimensions dim; int batchSize = 4; int instance0 = 1; int instance1 = 5; int N = 4; bool debug = false; dim.setInputPlanes( 64 ).setInputSize(19).setNumFilters( 64 ) .setFilterSize( 19 ) .setPadZeros( false ).setBiased( false ); TestArgsParser::arg( "n", &N ); DimFromArgs::arg( &dim ); TestArgsParser::arg( "instance0", &instance0 ); TestArgsParser::arg( "instance1", &instance1 ); TestArgsParser::arg( "debug", &debug ); TestArgsParser::arg( "batchsize", &batchSize ); TestArgsParser::go(); dim.deriveOthers(); compareSpecific( debug, N, batchSize, dim, instance0, instance1 ); }
TEST( SLOW_testforward, compare_args ) { LayerDimensions dim; int batchSize = 128; int instance0 = 1; int instance1 = 3; int N = 128; bool debug = false; dim.setInputPlanes( 64 ).setInputSize(19).setNumFilters( 64 ) .setFilterSize( 7 ) .setPadZeros( true ).setBiased( false ); TestArgsParser::arg( "n", &N ); DimFromArgs::arg( &dim ); TestArgsParser::arg( "instance0", &instance0 ); TestArgsParser::arg( "instance1", &instance1 ); TestArgsParser::arg( "debug", &debug ); TestArgsParser::arg( "batchsize", &batchSize ); TestArgsParser::go(); dim.deriveOthers(); compareSpecific( debug, N, batchSize, dim, instance0, instance1 ); }
TEST( testforward, compare_break1_0_4 ) { LayerDimensions dim; dim.setInputPlanes( 1 ).setInputSize( 33 ).setNumFilters( 1 ).setFilterSize( 1 ) .setPadZeros( false ).setBiased( false ); compareSpecific( false, 1, 1, dim, 0, 4 ); }