Example #1
0
TEST_P(Test_TensorFlow_layers, resize_nearest_neighbor)
{
    if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_MYRIAD)
        throw SkipTestException("");
    runTensorFlowNet("resize_nearest_neighbor");
    runTensorFlowNet("keras_upsampling2d");
}
Example #2
0
TEST_P(Test_TensorFlow_layers, reshape)
{
    int targetId = GetParam();
    runTensorFlowNet("shift_reshape_no_reorder", targetId);
    runTensorFlowNet("reshape_reduce", targetId);
    runTensorFlowNet("flatten", targetId, true);
}
Example #3
0
TEST_P(Test_TensorFlow_layers, padding)
{
    int targetId = GetParam();
    runTensorFlowNet("padding_same", targetId);
    runTensorFlowNet("padding_valid", targetId);
    runTensorFlowNet("spatial_padding", targetId);
}
Example #4
0
TEST_P(Test_TensorFlow_layers, pooling)
{
    runTensorFlowNet("max_pool_even");
    runTensorFlowNet("max_pool_odd_valid");
    runTensorFlowNet("max_pool_odd_same");
    runTensorFlowNet("reduce_mean");  // an average pooling over all spatial dimensions.
}
Example #5
0
TEST_P(Test_TensorFlow_layers, matmul)
{
    int targetId = GetParam();
    runTensorFlowNet("matmul", targetId);
    runTensorFlowNet("nhwc_reshape_matmul", targetId);
    runTensorFlowNet("nhwc_transpose_reshape_matmul", targetId);
}
Example #6
0
TEST_P(Test_TensorFlow_layers, matmul)
{
    if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
        throw SkipTestException("");
    runTensorFlowNet("matmul");
    runTensorFlowNet("nhwc_reshape_matmul");
    runTensorFlowNet("nhwc_transpose_reshape_matmul");
}
Example #7
0
TEST_P(Test_TensorFlow_layers, unfused_flatten)
{
    if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
        (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
        throw SkipTestException("");
    runTensorFlowNet("unfused_flatten");
    runTensorFlowNet("unfused_flatten_unknown_batch");
}
Example #8
0
TEST_P(Test_TensorFlow_layers, lstm)
{
    if (backend == DNN_BACKEND_INFERENCE_ENGINE ||
        (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
        throw SkipTestException("");
    runTensorFlowNet("lstm", true);
    runTensorFlowNet("lstm", true, 0.0, 0.0, true);
}
Example #9
0
TEST_P(Test_TensorFlow_layers, conv)
{
    int targetId = GetParam();
    runTensorFlowNet("single_conv", targetId);
    runTensorFlowNet("atrous_conv2d_valid", targetId);
    runTensorFlowNet("atrous_conv2d_same", targetId);
    runTensorFlowNet("depthwise_conv2d", targetId);
}
Example #10
0
TEST_P(Test_TensorFlow_layers, pooling)
{
    int targetId = GetParam();
    runTensorFlowNet("max_pool_even", targetId);
    runTensorFlowNet("max_pool_odd_valid", targetId);
    runTensorFlowNet("ave_pool_same", targetId);
    runTensorFlowNet("max_pool_odd_same", targetId);
}
Example #11
0
TEST_P(Test_TensorFlow_layers, deconvolution)
{
    int targetId = GetParam();
    runTensorFlowNet("deconvolution", targetId);
    runTensorFlowNet("deconvolution_same", targetId);
    runTensorFlowNet("deconvolution_stride_2_same", targetId);
    runTensorFlowNet("deconvolution_adj_pad_valid", targetId);
    runTensorFlowNet("deconvolution_adj_pad_same", targetId);
}
Example #12
0
TEST_P(Test_TensorFlow_layers, batch_norm)
{
    int targetId = GetParam();
    runTensorFlowNet("batch_norm", targetId);
    runTensorFlowNet("fused_batch_norm", targetId);
    runTensorFlowNet("batch_norm_text", targetId, true);
    runTensorFlowNet("mvn_batch_norm", targetId);
    runTensorFlowNet("mvn_batch_norm_1x1", targetId);
}
Example #13
0
TEST_P(Test_TensorFlow_layers, reshape)
{
    if (backend == DNN_BACKEND_INFERENCE_ENGINE)
        throw SkipTestException("");
    runTensorFlowNet("shift_reshape_no_reorder");
    runTensorFlowNet("reshape_no_reorder");
    runTensorFlowNet("reshape_reduce");
    runTensorFlowNet("reshape_as_shape");
}
Example #14
0
TEST(Test_TensorFlow, memory_read)
{
    double l1 = 1e-5;
    double lInf = 1e-4;
    runTensorFlowNet("lstm", DNN_TARGET_CPU, true, l1, lInf, true);

    runTensorFlowNet("batch_norm", DNN_TARGET_CPU, false, l1, lInf, true);
    runTensorFlowNet("fused_batch_norm", DNN_TARGET_CPU, false, l1, lInf, true);
    runTensorFlowNet("batch_norm_text", DNN_TARGET_CPU, true, l1, lInf, true);
}
Example #15
0
TEST_P(Test_TensorFlow_layers, slice)
{
    if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
        (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
        throw SkipTestException("");
    runTensorFlowNet("slice_4d");
}
Example #16
0
TEST_P(Test_TensorFlow_layers, flatten)
{
    if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
        (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD))
        throw SkipTestException("");
    runTensorFlowNet("flatten", true);
}
Example #17
0
// TODO: fix tests and replace to pooling
TEST_P(Test_TensorFlow_layers, ave_pool_same)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000
    if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
        throw SkipTestException("Test is enabled starts from OpenVINO 2018R3");
#endif
    runTensorFlowNet("ave_pool_same");
}
Example #18
0
TEST(Test_TensorFlow, fp16)
{
    const float l1 = 1e-3;
    const float lInf = 1e-2;
    runTensorFlowNet("fp16_single_conv", DNN_TARGET_CPU, false, l1, lInf);
    runTensorFlowNet("fp16_deconvolution", DNN_TARGET_CPU, false, l1, lInf);
    runTensorFlowNet("fp16_max_pool_odd_same", DNN_TARGET_CPU, false, l1, lInf);
    runTensorFlowNet("fp16_padding_valid", DNN_TARGET_CPU, false, l1, lInf);
    runTensorFlowNet("fp16_eltwise_add_mul", DNN_TARGET_CPU, false, l1, lInf);
    runTensorFlowNet("fp16_max_pool_odd_valid", DNN_TARGET_CPU, false, l1, lInf);
    runTensorFlowNet("fp16_pad_and_concat", DNN_TARGET_CPU, false, l1, lInf);
    runTensorFlowNet("fp16_max_pool_even", DNN_TARGET_CPU, false, l1, lInf);
    runTensorFlowNet("fp16_padding_same", DNN_TARGET_CPU, false, l1, lInf);
}
Example #19
0
// TODO: fix pad_and_concat and add this test case to fp16_weights
TEST_P(Test_TensorFlow_layers, fp16_pad_and_concat)
{
    const float l1 = 0.00071;
    const float lInf = 0.012;
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000
    if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
        throw SkipTestException("Test is enabled starts from OpenVINO 2018R3");
#endif
    runTensorFlowNet("fp16_pad_and_concat", false, l1, lInf);
}
Example #20
0
TEST_P(Test_TensorFlow_layers, fp16_weights)
{
    const float l1 = 0.00071;
    const float lInf = 0.012;
    runTensorFlowNet("fp16_single_conv", false, l1, lInf);
    runTensorFlowNet("fp16_deconvolution", false, l1, lInf);
    runTensorFlowNet("fp16_max_pool_odd_same", false, l1, lInf);
    runTensorFlowNet("fp16_padding_valid", false, l1, lInf);
    runTensorFlowNet("fp16_eltwise_add_mul", false, l1, lInf);
    runTensorFlowNet("fp16_max_pool_odd_valid", false, l1, lInf);
    runTensorFlowNet("fp16_max_pool_even", false, l1, lInf);
    runTensorFlowNet("fp16_padding_same", false, l1, lInf);
}
Example #21
0
TEST_P(Test_TensorFlow_layers, deconvolution)
{
    runTensorFlowNet("deconvolution");
    runTensorFlowNet("deconvolution_same");
    runTensorFlowNet("deconvolution_stride_2_same");
    runTensorFlowNet("deconvolution_adj_pad_valid");
    runTensorFlowNet("deconvolution_adj_pad_same");
    runTensorFlowNet("keras_deconv_valid");
    runTensorFlowNet("keras_deconv_same");
}
Example #22
0
TEST_P(Test_TensorFlow_layers, conv)
{
    runTensorFlowNet("single_conv");
    runTensorFlowNet("atrous_conv2d_valid");
    runTensorFlowNet("atrous_conv2d_same");
    runTensorFlowNet("depthwise_conv2d");
    runTensorFlowNet("keras_atrous_conv2d_same");
    runTensorFlowNet("conv_pool_nchw");
}
Example #23
0
TEST_P(Test_TensorFlow_layers, eltwise_add_mul)
{
    runTensorFlowNet("eltwise_add_mul", GetParam());
}
Example #24
0
TEST(Test_TensorFlow, resize_nearest_neighbor)
{
    runTensorFlowNet("resize_nearest_neighbor");
}
Example #25
0
TEST(Test_TensorFlow, slice)
{
    runTensorFlowNet("slice_4d");
}
Example #26
0
TEST(Test_TensorFlow, split)
{
    runTensorFlowNet("split_equals");
}
Example #27
0
TEST(Test_TensorFlow, lstm)
{
    runTensorFlowNet("lstm", DNN_TARGET_CPU, true);
}
Example #28
0
TEST(Test_TensorFlow, quantized)
{
    runTensorFlowNet("uint8_single_conv");
}
Example #29
0
TEST_P(Test_TensorFlow_layers, pad_and_concat)
{
    runTensorFlowNet("pad_and_concat", GetParam());
}
Example #30
0
TEST(Test_TensorFlow, defun)
{
    runTensorFlowNet("defun_dropout");
}