TEST_P(Test_TensorFlow_layers, resize_nearest_neighbor) { if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_MYRIAD) throw SkipTestException(""); runTensorFlowNet("resize_nearest_neighbor"); runTensorFlowNet("keras_upsampling2d"); }
TEST_P(Test_TensorFlow_layers, reshape) { int targetId = GetParam(); runTensorFlowNet("shift_reshape_no_reorder", targetId); runTensorFlowNet("reshape_reduce", targetId); runTensorFlowNet("flatten", targetId, true); }
TEST_P(Test_TensorFlow_layers, padding) { int targetId = GetParam(); runTensorFlowNet("padding_same", targetId); runTensorFlowNet("padding_valid", targetId); runTensorFlowNet("spatial_padding", targetId); }
TEST_P(Test_TensorFlow_layers, pooling) { runTensorFlowNet("max_pool_even"); runTensorFlowNet("max_pool_odd_valid"); runTensorFlowNet("max_pool_odd_same"); runTensorFlowNet("reduce_mean"); // an average pooling over all spatial dimensions. }
TEST_P(Test_TensorFlow_layers, matmul) { int targetId = GetParam(); runTensorFlowNet("matmul", targetId); runTensorFlowNet("nhwc_reshape_matmul", targetId); runTensorFlowNet("nhwc_transpose_reshape_matmul", targetId); }
TEST_P(Test_TensorFlow_layers, matmul) { if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) throw SkipTestException(""); runTensorFlowNet("matmul"); runTensorFlowNet("nhwc_reshape_matmul"); runTensorFlowNet("nhwc_transpose_reshape_matmul"); }
TEST_P(Test_TensorFlow_layers, unfused_flatten) { if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) throw SkipTestException(""); runTensorFlowNet("unfused_flatten"); runTensorFlowNet("unfused_flatten_unknown_batch"); }
TEST_P(Test_TensorFlow_layers, lstm) { if (backend == DNN_BACKEND_INFERENCE_ENGINE || (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)) throw SkipTestException(""); runTensorFlowNet("lstm", true); runTensorFlowNet("lstm", true, 0.0, 0.0, true); }
TEST_P(Test_TensorFlow_layers, conv) { int targetId = GetParam(); runTensorFlowNet("single_conv", targetId); runTensorFlowNet("atrous_conv2d_valid", targetId); runTensorFlowNet("atrous_conv2d_same", targetId); runTensorFlowNet("depthwise_conv2d", targetId); }
TEST_P(Test_TensorFlow_layers, pooling) { int targetId = GetParam(); runTensorFlowNet("max_pool_even", targetId); runTensorFlowNet("max_pool_odd_valid", targetId); runTensorFlowNet("ave_pool_same", targetId); runTensorFlowNet("max_pool_odd_same", targetId); }
TEST_P(Test_TensorFlow_layers, deconvolution) { int targetId = GetParam(); runTensorFlowNet("deconvolution", targetId); runTensorFlowNet("deconvolution_same", targetId); runTensorFlowNet("deconvolution_stride_2_same", targetId); runTensorFlowNet("deconvolution_adj_pad_valid", targetId); runTensorFlowNet("deconvolution_adj_pad_same", targetId); }
TEST_P(Test_TensorFlow_layers, batch_norm) { int targetId = GetParam(); runTensorFlowNet("batch_norm", targetId); runTensorFlowNet("fused_batch_norm", targetId); runTensorFlowNet("batch_norm_text", targetId, true); runTensorFlowNet("mvn_batch_norm", targetId); runTensorFlowNet("mvn_batch_norm_1x1", targetId); }
TEST_P(Test_TensorFlow_layers, reshape) { if (backend == DNN_BACKEND_INFERENCE_ENGINE) throw SkipTestException(""); runTensorFlowNet("shift_reshape_no_reorder"); runTensorFlowNet("reshape_no_reorder"); runTensorFlowNet("reshape_reduce"); runTensorFlowNet("reshape_as_shape"); }
TEST(Test_TensorFlow, memory_read) { double l1 = 1e-5; double lInf = 1e-4; runTensorFlowNet("lstm", DNN_TARGET_CPU, true, l1, lInf, true); runTensorFlowNet("batch_norm", DNN_TARGET_CPU, false, l1, lInf, true); runTensorFlowNet("fused_batch_norm", DNN_TARGET_CPU, false, l1, lInf, true); runTensorFlowNet("batch_norm_text", DNN_TARGET_CPU, true, l1, lInf, true); }
TEST_P(Test_TensorFlow_layers, slice) { if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) throw SkipTestException(""); runTensorFlowNet("slice_4d"); }
TEST_P(Test_TensorFlow_layers, flatten) { if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)) throw SkipTestException(""); runTensorFlowNet("flatten", true); }
// TODO: fix tests and replace to pooling TEST_P(Test_TensorFlow_layers, ave_pool_same) { #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) throw SkipTestException("Test is enabled starts from OpenVINO 2018R3"); #endif runTensorFlowNet("ave_pool_same"); }
TEST(Test_TensorFlow, fp16) { const float l1 = 1e-3; const float lInf = 1e-2; runTensorFlowNet("fp16_single_conv", DNN_TARGET_CPU, false, l1, lInf); runTensorFlowNet("fp16_deconvolution", DNN_TARGET_CPU, false, l1, lInf); runTensorFlowNet("fp16_max_pool_odd_same", DNN_TARGET_CPU, false, l1, lInf); runTensorFlowNet("fp16_padding_valid", DNN_TARGET_CPU, false, l1, lInf); runTensorFlowNet("fp16_eltwise_add_mul", DNN_TARGET_CPU, false, l1, lInf); runTensorFlowNet("fp16_max_pool_odd_valid", DNN_TARGET_CPU, false, l1, lInf); runTensorFlowNet("fp16_pad_and_concat", DNN_TARGET_CPU, false, l1, lInf); runTensorFlowNet("fp16_max_pool_even", DNN_TARGET_CPU, false, l1, lInf); runTensorFlowNet("fp16_padding_same", DNN_TARGET_CPU, false, l1, lInf); }
// TODO: fix pad_and_concat and add this test case to fp16_weights TEST_P(Test_TensorFlow_layers, fp16_pad_and_concat) { const float l1 = 0.00071; const float lInf = 0.012; #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) throw SkipTestException("Test is enabled starts from OpenVINO 2018R3"); #endif runTensorFlowNet("fp16_pad_and_concat", false, l1, lInf); }
TEST_P(Test_TensorFlow_layers, fp16_weights) { const float l1 = 0.00071; const float lInf = 0.012; runTensorFlowNet("fp16_single_conv", false, l1, lInf); runTensorFlowNet("fp16_deconvolution", false, l1, lInf); runTensorFlowNet("fp16_max_pool_odd_same", false, l1, lInf); runTensorFlowNet("fp16_padding_valid", false, l1, lInf); runTensorFlowNet("fp16_eltwise_add_mul", false, l1, lInf); runTensorFlowNet("fp16_max_pool_odd_valid", false, l1, lInf); runTensorFlowNet("fp16_max_pool_even", false, l1, lInf); runTensorFlowNet("fp16_padding_same", false, l1, lInf); }
TEST_P(Test_TensorFlow_layers, deconvolution) { runTensorFlowNet("deconvolution"); runTensorFlowNet("deconvolution_same"); runTensorFlowNet("deconvolution_stride_2_same"); runTensorFlowNet("deconvolution_adj_pad_valid"); runTensorFlowNet("deconvolution_adj_pad_same"); runTensorFlowNet("keras_deconv_valid"); runTensorFlowNet("keras_deconv_same"); }
TEST_P(Test_TensorFlow_layers, conv) { runTensorFlowNet("single_conv"); runTensorFlowNet("atrous_conv2d_valid"); runTensorFlowNet("atrous_conv2d_same"); runTensorFlowNet("depthwise_conv2d"); runTensorFlowNet("keras_atrous_conv2d_same"); runTensorFlowNet("conv_pool_nchw"); }
TEST_P(Test_TensorFlow_layers, eltwise_add_mul) { runTensorFlowNet("eltwise_add_mul", GetParam()); }
TEST(Test_TensorFlow, resize_nearest_neighbor) { runTensorFlowNet("resize_nearest_neighbor"); }
TEST(Test_TensorFlow, slice) { runTensorFlowNet("slice_4d"); }
TEST(Test_TensorFlow, split) { runTensorFlowNet("split_equals"); }
TEST(Test_TensorFlow, lstm) { runTensorFlowNet("lstm", DNN_TARGET_CPU, true); }
TEST(Test_TensorFlow, quantized) { runTensorFlowNet("uint8_single_conv"); }
TEST_P(Test_TensorFlow_layers, pad_and_concat) { runTensorFlowNet("pad_and_concat", GetParam()); }
TEST(Test_TensorFlow, defun) { runTensorFlowNet("defun_dropout"); }