| // clang-format off |
| // DO NOT EDIT; |
| // Generated by ml/nn/runtime/test/specs/generate_vts_test.sh |
| // Generated from: argmax_1_float.mod.py. |
| namespace argmax_1_float { |
| // Generated argmax_1_float test |
| #include "examples/argmax_1_float.example.cpp" |
| // Generated model constructor |
| #include "vts_models/argmax_1_float.model.cpp" |
| } // namespace argmax_1_float |
| |
| TEST_F(NeuralnetworksHidlTest, argmax_1_float) { |
| generated_tests::Execute(device, |
| argmax_1_float::createTestModel, |
| argmax_1_float::is_ignored, |
| argmax_1_float::examples); |
| } |
| |
| // Generated from: argmax_1_float_relaxed.mod.py. |
| namespace argmax_1_float_relaxed { |
| // Generated argmax_1_float_relaxed test |
| #include "examples/argmax_1_float_relaxed.example.cpp" |
| // Generated model constructor |
| #include "vts_models/argmax_1_float_relaxed.model.cpp" |
| } // namespace argmax_1_float_relaxed |
| |
| TEST_F(NeuralnetworksHidlTest, argmax_1_float_relaxed) { |
| generated_tests::Execute(device, |
| argmax_1_float_relaxed::createTestModel, |
| argmax_1_float_relaxed::is_ignored, |
| argmax_1_float_relaxed::examples); |
| } |
| |
| // Generated from: argmax_1_int32.mod.py. |
| namespace argmax_1_int32 { |
| // Generated argmax_1_int32 test |
| #include "examples/argmax_1_int32.example.cpp" |
| // Generated model constructor |
| #include "vts_models/argmax_1_int32.model.cpp" |
| } // namespace argmax_1_int32 |
| |
| TEST_F(NeuralnetworksHidlTest, argmax_1_int32) { |
| generated_tests::Execute(device, |
| argmax_1_int32::createTestModel, |
| argmax_1_int32::is_ignored, |
| argmax_1_int32::examples); |
| } |
| |
| // Generated from: argmax_1_quant8.mod.py. |
| namespace argmax_1_quant8 { |
| // Generated argmax_1_quant8 test |
| #include "examples/argmax_1_quant8.example.cpp" |
| // Generated model constructor |
| #include "vts_models/argmax_1_quant8.model.cpp" |
| } // namespace argmax_1_quant8 |
| |
| TEST_F(NeuralnetworksHidlTest, argmax_1_quant8) { |
| generated_tests::Execute(device, |
| argmax_1_quant8::createTestModel, |
| argmax_1_quant8::is_ignored, |
| argmax_1_quant8::examples); |
| } |
| |
| // Generated from: argmax_2_float.mod.py. |
| namespace argmax_2_float { |
| // Generated argmax_2_float test |
| #include "examples/argmax_2_float.example.cpp" |
| // Generated model constructor |
| #include "vts_models/argmax_2_float.model.cpp" |
| } // namespace argmax_2_float |
| |
| TEST_F(NeuralnetworksHidlTest, argmax_2_float) { |
| generated_tests::Execute(device, |
| argmax_2_float::createTestModel, |
| argmax_2_float::is_ignored, |
| argmax_2_float::examples); |
| } |
| |
| // Generated from: argmax_2_float_relaxed.mod.py. |
| namespace argmax_2_float_relaxed { |
| // Generated argmax_2_float_relaxed test |
| #include "examples/argmax_2_float_relaxed.example.cpp" |
| // Generated model constructor |
| #include "vts_models/argmax_2_float_relaxed.model.cpp" |
| } // namespace argmax_2_float_relaxed |
| |
| TEST_F(NeuralnetworksHidlTest, argmax_2_float_relaxed) { |
| generated_tests::Execute(device, |
| argmax_2_float_relaxed::createTestModel, |
| argmax_2_float_relaxed::is_ignored, |
| argmax_2_float_relaxed::examples); |
| } |
| |
| // Generated from: argmax_2_int32.mod.py. |
| namespace argmax_2_int32 { |
| // Generated argmax_2_int32 test |
| #include "examples/argmax_2_int32.example.cpp" |
| // Generated model constructor |
| #include "vts_models/argmax_2_int32.model.cpp" |
| } // namespace argmax_2_int32 |
| |
| TEST_F(NeuralnetworksHidlTest, argmax_2_int32) { |
| generated_tests::Execute(device, |
| argmax_2_int32::createTestModel, |
| argmax_2_int32::is_ignored, |
| argmax_2_int32::examples); |
| } |
| |
| // Generated from: argmax_2_quant8.mod.py. |
| namespace argmax_2_quant8 { |
| // Generated argmax_2_quant8 test |
| #include "examples/argmax_2_quant8.example.cpp" |
| // Generated model constructor |
| #include "vts_models/argmax_2_quant8.model.cpp" |
| } // namespace argmax_2_quant8 |
| |
| TEST_F(NeuralnetworksHidlTest, argmax_2_quant8) { |
| generated_tests::Execute(device, |
| argmax_2_quant8::createTestModel, |
| argmax_2_quant8::is_ignored, |
| argmax_2_quant8::examples); |
| } |
| |
| // Generated from: argmax_3_float.mod.py. |
| namespace argmax_3_float { |
| // Generated argmax_3_float test |
| #include "examples/argmax_3_float.example.cpp" |
| // Generated model constructor |
| #include "vts_models/argmax_3_float.model.cpp" |
| } // namespace argmax_3_float |
| |
| TEST_F(NeuralnetworksHidlTest, argmax_3_float) { |
| generated_tests::Execute(device, |
| argmax_3_float::createTestModel, |
| argmax_3_float::is_ignored, |
| argmax_3_float::examples); |
| } |
| |
| // Generated from: argmin_1_float.mod.py. |
| namespace argmin_1_float { |
| // Generated argmin_1_float test |
| #include "examples/argmin_1_float.example.cpp" |
| // Generated model constructor |
| #include "vts_models/argmin_1_float.model.cpp" |
| } // namespace argmin_1_float |
| |
| TEST_F(NeuralnetworksHidlTest, argmin_1_float) { |
| generated_tests::Execute(device, |
| argmin_1_float::createTestModel, |
| argmin_1_float::is_ignored, |
| argmin_1_float::examples); |
| } |
| |
| // Generated from: argmin_1_float_relaxed.mod.py. |
| namespace argmin_1_float_relaxed { |
| // Generated argmin_1_float_relaxed test |
| #include "examples/argmin_1_float_relaxed.example.cpp" |
| // Generated model constructor |
| #include "vts_models/argmin_1_float_relaxed.model.cpp" |
| } // namespace argmin_1_float_relaxed |
| |
| TEST_F(NeuralnetworksHidlTest, argmin_1_float_relaxed) { |
| generated_tests::Execute(device, |
| argmin_1_float_relaxed::createTestModel, |
| argmin_1_float_relaxed::is_ignored, |
| argmin_1_float_relaxed::examples); |
| } |
| |
| // Generated from: argmin_1_int32.mod.py. |
| namespace argmin_1_int32 { |
| // Generated argmin_1_int32 test |
| #include "examples/argmin_1_int32.example.cpp" |
| // Generated model constructor |
| #include "vts_models/argmin_1_int32.model.cpp" |
| } // namespace argmin_1_int32 |
| |
| TEST_F(NeuralnetworksHidlTest, argmin_1_int32) { |
| generated_tests::Execute(device, |
| argmin_1_int32::createTestModel, |
| argmin_1_int32::is_ignored, |
| argmin_1_int32::examples); |
| } |
| |
| // Generated from: argmin_1_quant8.mod.py. |
| namespace argmin_1_quant8 { |
| // Generated argmin_1_quant8 test |
| #include "examples/argmin_1_quant8.example.cpp" |
| // Generated model constructor |
| #include "vts_models/argmin_1_quant8.model.cpp" |
| } // namespace argmin_1_quant8 |
| |
| TEST_F(NeuralnetworksHidlTest, argmin_1_quant8) { |
| generated_tests::Execute(device, |
| argmin_1_quant8::createTestModel, |
| argmin_1_quant8::is_ignored, |
| argmin_1_quant8::examples); |
| } |
| |
| // Generated from: argmin_2_float.mod.py. |
| namespace argmin_2_float { |
| // Generated argmin_2_float test |
| #include "examples/argmin_2_float.example.cpp" |
| // Generated model constructor |
| #include "vts_models/argmin_2_float.model.cpp" |
| } // namespace argmin_2_float |
| |
| TEST_F(NeuralnetworksHidlTest, argmin_2_float) { |
| generated_tests::Execute(device, |
| argmin_2_float::createTestModel, |
| argmin_2_float::is_ignored, |
| argmin_2_float::examples); |
| } |
| |
| // Generated from: argmin_2_float_relaxed.mod.py. |
| namespace argmin_2_float_relaxed { |
| // Generated argmin_2_float_relaxed test |
| #include "examples/argmin_2_float_relaxed.example.cpp" |
| // Generated model constructor |
| #include "vts_models/argmin_2_float_relaxed.model.cpp" |
| } // namespace argmin_2_float_relaxed |
| |
| TEST_F(NeuralnetworksHidlTest, argmin_2_float_relaxed) { |
| generated_tests::Execute(device, |
| argmin_2_float_relaxed::createTestModel, |
| argmin_2_float_relaxed::is_ignored, |
| argmin_2_float_relaxed::examples); |
| } |
| |
| // Generated from: argmin_2_int32.mod.py. |
| namespace argmin_2_int32 { |
| // Generated argmin_2_int32 test |
| #include "examples/argmin_2_int32.example.cpp" |
| // Generated model constructor |
| #include "vts_models/argmin_2_int32.model.cpp" |
| } // namespace argmin_2_int32 |
| |
| TEST_F(NeuralnetworksHidlTest, argmin_2_int32) { |
| generated_tests::Execute(device, |
| argmin_2_int32::createTestModel, |
| argmin_2_int32::is_ignored, |
| argmin_2_int32::examples); |
| } |
| |
| // Generated from: argmin_2_quant8.mod.py. |
| namespace argmin_2_quant8 { |
| // Generated argmin_2_quant8 test |
| #include "examples/argmin_2_quant8.example.cpp" |
| // Generated model constructor |
| #include "vts_models/argmin_2_quant8.model.cpp" |
| } // namespace argmin_2_quant8 |
| |
| TEST_F(NeuralnetworksHidlTest, argmin_2_quant8) { |
| generated_tests::Execute(device, |
| argmin_2_quant8::createTestModel, |
| argmin_2_quant8::is_ignored, |
| argmin_2_quant8::examples); |
| } |
| |
| // Generated from: argmin_3_float.mod.py. |
| namespace argmin_3_float { |
| // Generated argmin_3_float test |
| #include "examples/argmin_3_float.example.cpp" |
| // Generated model constructor |
| #include "vts_models/argmin_3_float.model.cpp" |
| } // namespace argmin_3_float |
| |
| TEST_F(NeuralnetworksHidlTest, argmin_3_float) { |
| generated_tests::Execute(device, |
| argmin_3_float::createTestModel, |
| argmin_3_float::is_ignored, |
| argmin_3_float::examples); |
| } |
| |
| // Generated from: channel_shuffle.mod.py. |
| namespace channel_shuffle { |
| // Generated channel_shuffle test |
| #include "examples/channel_shuffle.example.cpp" |
| // Generated model constructor |
| #include "vts_models/channel_shuffle.model.cpp" |
| } // namespace channel_shuffle |
| |
| TEST_F(NeuralnetworksHidlTest, channel_shuffle) { |
| generated_tests::Execute(device, |
| channel_shuffle::createTestModel, |
| channel_shuffle::is_ignored, |
| channel_shuffle::examples); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, channel_shuffle_relaxed) { |
| generated_tests::Execute(device, |
| channel_shuffle::createTestModel_relaxed, |
| channel_shuffle::is_ignored_relaxed, |
| channel_shuffle::examples_relaxed); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, channel_shuffle_quant8) { |
| generated_tests::Execute(device, |
| channel_shuffle::createTestModel_quant8, |
| channel_shuffle::is_ignored_quant8, |
| channel_shuffle::examples_quant8); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, channel_shuffle_2) { |
| generated_tests::Execute(device, |
| channel_shuffle::createTestModel_2, |
| channel_shuffle::is_ignored_2, |
| channel_shuffle::examples_2); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, channel_shuffle_relaxed_2) { |
| generated_tests::Execute(device, |
| channel_shuffle::createTestModel_relaxed_2, |
| channel_shuffle::is_ignored_relaxed_2, |
| channel_shuffle::examples_relaxed_2); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, channel_shuffle_quant8_2) { |
| generated_tests::Execute(device, |
| channel_shuffle::createTestModel_quant8_2, |
| channel_shuffle::is_ignored_quant8_2, |
| channel_shuffle::examples_quant8_2); |
| } |
| |
| // Generated from: expand_dims.mod.py. |
| namespace expand_dims { |
| // Generated expand_dims test |
| #include "examples/expand_dims.example.cpp" |
| // Generated model constructor |
| #include "vts_models/expand_dims.model.cpp" |
| } // namespace expand_dims |
| |
| TEST_F(NeuralnetworksHidlTest, expand_dims) { |
| generated_tests::Execute(device, |
| expand_dims::createTestModel, |
| expand_dims::is_ignored, |
| expand_dims::examples); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, expand_dims_relaxed) { |
| generated_tests::Execute(device, |
| expand_dims::createTestModel_relaxed, |
| expand_dims::is_ignored_relaxed, |
| expand_dims::examples_relaxed); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, expand_dims_quant8) { |
| generated_tests::Execute(device, |
| expand_dims::createTestModel_quant8, |
| expand_dims::is_ignored_quant8, |
| expand_dims::examples_quant8); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, expand_dims_int32) { |
| generated_tests::Execute(device, |
| expand_dims::createTestModel_int32, |
| expand_dims::is_ignored_int32, |
| expand_dims::examples_int32); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, expand_dims_2) { |
| generated_tests::Execute(device, |
| expand_dims::createTestModel_2, |
| expand_dims::is_ignored_2, |
| expand_dims::examples_2); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, expand_dims_relaxed_2) { |
| generated_tests::Execute(device, |
| expand_dims::createTestModel_relaxed_2, |
| expand_dims::is_ignored_relaxed_2, |
| expand_dims::examples_relaxed_2); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, expand_dims_quant8_2) { |
| generated_tests::Execute(device, |
| expand_dims::createTestModel_quant8_2, |
| expand_dims::is_ignored_quant8_2, |
| expand_dims::examples_quant8_2); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, expand_dims_int32_2) { |
| generated_tests::Execute(device, |
| expand_dims::createTestModel_int32_2, |
| expand_dims::is_ignored_int32_2, |
| expand_dims::examples_int32_2); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, expand_dims_3) { |
| generated_tests::Execute(device, |
| expand_dims::createTestModel_3, |
| expand_dims::is_ignored_3, |
| expand_dims::examples_3); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, expand_dims_relaxed_3) { |
| generated_tests::Execute(device, |
| expand_dims::createTestModel_relaxed_3, |
| expand_dims::is_ignored_relaxed_3, |
| expand_dims::examples_relaxed_3); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, expand_dims_quant8_3) { |
| generated_tests::Execute(device, |
| expand_dims::createTestModel_quant8_3, |
| expand_dims::is_ignored_quant8_3, |
| expand_dims::examples_quant8_3); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, expand_dims_int32_3) { |
| generated_tests::Execute(device, |
| expand_dims::createTestModel_int32_3, |
| expand_dims::is_ignored_int32_3, |
| expand_dims::examples_int32_3); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, expand_dims_4) { |
| generated_tests::Execute(device, |
| expand_dims::createTestModel_4, |
| expand_dims::is_ignored_4, |
| expand_dims::examples_4); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, expand_dims_relaxed_4) { |
| generated_tests::Execute(device, |
| expand_dims::createTestModel_relaxed_4, |
| expand_dims::is_ignored_relaxed_4, |
| expand_dims::examples_relaxed_4); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, expand_dims_quant8_4) { |
| generated_tests::Execute(device, |
| expand_dims::createTestModel_quant8_4, |
| expand_dims::is_ignored_quant8_4, |
| expand_dims::examples_quant8_4); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, expand_dims_int32_4) { |
| generated_tests::Execute(device, |
| expand_dims::createTestModel_int32_4, |
| expand_dims::is_ignored_int32_4, |
| expand_dims::examples_int32_4); |
| } |
| |
| // Generated from: grouped_conv2d.mod.py. |
| namespace grouped_conv2d { |
| // Generated grouped_conv2d test |
| #include "examples/grouped_conv2d.example.cpp" |
| // Generated model constructor |
| #include "vts_models/grouped_conv2d.model.cpp" |
| } // namespace grouped_conv2d |
| |
| TEST_F(NeuralnetworksHidlTest, grouped_conv2d) { |
| generated_tests::Execute(device, |
| grouped_conv2d::createTestModel, |
| grouped_conv2d::is_ignored, |
| grouped_conv2d::examples); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, grouped_conv2d_weight_as_input) { |
| generated_tests::Execute(device, |
| grouped_conv2d::createTestModel_weight_as_input, |
| grouped_conv2d::is_ignored_weight_as_input, |
| grouped_conv2d::examples_weight_as_input); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, grouped_conv2d_relaxed) { |
| generated_tests::Execute(device, |
| grouped_conv2d::createTestModel_relaxed, |
| grouped_conv2d::is_ignored_relaxed, |
| grouped_conv2d::examples_relaxed); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, grouped_conv2d_relaxed_weight_as_input) { |
| generated_tests::Execute(device, |
| grouped_conv2d::createTestModel_relaxed_weight_as_input, |
| grouped_conv2d::is_ignored_relaxed_weight_as_input, |
| grouped_conv2d::examples_relaxed_weight_as_input); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, grouped_conv2d_quant8) { |
| generated_tests::Execute(device, |
| grouped_conv2d::createTestModel_quant8, |
| grouped_conv2d::is_ignored_quant8, |
| grouped_conv2d::examples_quant8); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, grouped_conv2d_quant8_weight_as_input) { |
| generated_tests::Execute(device, |
| grouped_conv2d::createTestModel_quant8_weight_as_input, |
| grouped_conv2d::is_ignored_quant8_weight_as_input, |
| grouped_conv2d::examples_quant8_weight_as_input); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, grouped_conv2d_large) { |
| generated_tests::Execute(device, |
| grouped_conv2d::createTestModel_large, |
| grouped_conv2d::is_ignored_large, |
| grouped_conv2d::examples_large); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, grouped_conv2d_large_weight_as_input) { |
| generated_tests::Execute(device, |
| grouped_conv2d::createTestModel_large_weight_as_input, |
| grouped_conv2d::is_ignored_large_weight_as_input, |
| grouped_conv2d::examples_large_weight_as_input); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, grouped_conv2d_large_relaxed) { |
| generated_tests::Execute(device, |
| grouped_conv2d::createTestModel_large_relaxed, |
| grouped_conv2d::is_ignored_large_relaxed, |
| grouped_conv2d::examples_large_relaxed); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, grouped_conv2d_large_relaxed_weight_as_input) { |
| generated_tests::Execute(device, |
| grouped_conv2d::createTestModel_large_relaxed_weight_as_input, |
| grouped_conv2d::is_ignored_large_relaxed_weight_as_input, |
| grouped_conv2d::examples_large_relaxed_weight_as_input); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, grouped_conv2d_large_quant8) { |
| generated_tests::Execute(device, |
| grouped_conv2d::createTestModel_large_quant8, |
| grouped_conv2d::is_ignored_large_quant8, |
| grouped_conv2d::examples_large_quant8); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, grouped_conv2d_large_quant8_weight_as_input) { |
| generated_tests::Execute(device, |
| grouped_conv2d::createTestModel_large_quant8_weight_as_input, |
| grouped_conv2d::is_ignored_large_quant8_weight_as_input, |
| grouped_conv2d::examples_large_quant8_weight_as_input); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, grouped_conv2d_channel) { |
| generated_tests::Execute(device, |
| grouped_conv2d::createTestModel_channel, |
| grouped_conv2d::is_ignored_channel, |
| grouped_conv2d::examples_channel); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, grouped_conv2d_channel_weight_as_input) { |
| generated_tests::Execute(device, |
| grouped_conv2d::createTestModel_channel_weight_as_input, |
| grouped_conv2d::is_ignored_channel_weight_as_input, |
| grouped_conv2d::examples_channel_weight_as_input); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, grouped_conv2d_channel_relaxed) { |
| generated_tests::Execute(device, |
| grouped_conv2d::createTestModel_channel_relaxed, |
| grouped_conv2d::is_ignored_channel_relaxed, |
| grouped_conv2d::examples_channel_relaxed); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, grouped_conv2d_channel_relaxed_weight_as_input) { |
| generated_tests::Execute(device, |
| grouped_conv2d::createTestModel_channel_relaxed_weight_as_input, |
| grouped_conv2d::is_ignored_channel_relaxed_weight_as_input, |
| grouped_conv2d::examples_channel_relaxed_weight_as_input); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, grouped_conv2d_channel_quant8) { |
| generated_tests::Execute(device, |
| grouped_conv2d::createTestModel_channel_quant8, |
| grouped_conv2d::is_ignored_channel_quant8, |
| grouped_conv2d::examples_channel_quant8); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, grouped_conv2d_channel_quant8_weight_as_input) { |
| generated_tests::Execute(device, |
| grouped_conv2d::createTestModel_channel_quant8_weight_as_input, |
| grouped_conv2d::is_ignored_channel_quant8_weight_as_input, |
| grouped_conv2d::examples_channel_quant8_weight_as_input); |
| } |
| |
| // Generated from: heatmap_max_keypoint.mod.py. |
| namespace heatmap_max_keypoint { |
| // Generated heatmap_max_keypoint test |
| #include "examples/heatmap_max_keypoint.example.cpp" |
| // Generated model constructor |
| #include "vts_models/heatmap_max_keypoint.model.cpp" |
| } // namespace heatmap_max_keypoint |
| |
| TEST_F(NeuralnetworksHidlTest, heatmap_max_keypoint) { |
| generated_tests::Execute(device, |
| heatmap_max_keypoint::createTestModel, |
| heatmap_max_keypoint::is_ignored, |
| heatmap_max_keypoint::examples); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, heatmap_max_keypoint_relaxed) { |
| generated_tests::Execute(device, |
| heatmap_max_keypoint::createTestModel_relaxed, |
| heatmap_max_keypoint::is_ignored_relaxed, |
| heatmap_max_keypoint::examples_relaxed); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, heatmap_max_keypoint_2) { |
| generated_tests::Execute(device, |
| heatmap_max_keypoint::createTestModel_2, |
| heatmap_max_keypoint::is_ignored_2, |
| heatmap_max_keypoint::examples_2); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, heatmap_max_keypoint_relaxed_2) { |
| generated_tests::Execute(device, |
| heatmap_max_keypoint::createTestModel_relaxed_2, |
| heatmap_max_keypoint::is_ignored_relaxed_2, |
| heatmap_max_keypoint::examples_relaxed_2); |
| } |
| |
| // Generated from: lsh_projection_3_relaxed.mod.py. |
| namespace lsh_projection_3_relaxed { |
| // Generated lsh_projection_3_relaxed test |
| #include "examples/lsh_projection_3_relaxed.example.cpp" |
| // Generated model constructor |
| #include "vts_models/lsh_projection_3_relaxed.model.cpp" |
| } // namespace lsh_projection_3_relaxed |
| |
| TEST_F(NeuralnetworksHidlTest, lsh_projection_3_relaxed) { |
| generated_tests::Execute(device, |
| lsh_projection_3_relaxed::createTestModel, |
| lsh_projection_3_relaxed::is_ignored, |
| lsh_projection_3_relaxed::examples); |
| } |
| |
| // Generated from: lsh_projection_4_relaxed.mod.py. |
| namespace lsh_projection_4_relaxed { |
| // Generated lsh_projection_4_relaxed test |
| #include "examples/lsh_projection_4_relaxed.example.cpp" |
| // Generated model constructor |
| #include "vts_models/lsh_projection_4_relaxed.model.cpp" |
| } // namespace lsh_projection_4_relaxed |
| |
| TEST_F(NeuralnetworksHidlTest, lsh_projection_4_relaxed) { |
| generated_tests::Execute(device, |
| lsh_projection_4_relaxed::createTestModel, |
| lsh_projection_4_relaxed::is_ignored, |
| lsh_projection_4_relaxed::examples); |
| } |
| |
| // Generated from: lsh_projection_deprecated.mod.py. |
| namespace lsh_projection_deprecated { |
| // Generated lsh_projection_deprecated test |
| #include "examples/lsh_projection_deprecated.example.cpp" |
| // Generated model constructor |
| #include "vts_models/lsh_projection_deprecated.model.cpp" |
| } // namespace lsh_projection_deprecated |
| |
| TEST_F(NeuralnetworksHidlTest, lsh_projection_deprecated) { |
| generated_tests::Execute(device, |
| lsh_projection_deprecated::createTestModel, |
| lsh_projection_deprecated::is_ignored, |
| lsh_projection_deprecated::examples); |
| } |
| |
| // Generated from: pad_v2_1_float.mod.py. |
| namespace pad_v2_1_float { |
| // Generated pad_v2_1_float test |
| #include "examples/pad_v2_1_float.example.cpp" |
| // Generated model constructor |
| #include "vts_models/pad_v2_1_float.model.cpp" |
| } // namespace pad_v2_1_float |
| |
| TEST_F(NeuralnetworksHidlTest, pad_v2_1_float) { |
| generated_tests::Execute(device, |
| pad_v2_1_float::createTestModel, |
| pad_v2_1_float::is_ignored, |
| pad_v2_1_float::examples); |
| } |
| |
| // Generated from: pad_v2_1_float_relaxed.mod.py. |
| namespace pad_v2_1_float_relaxed { |
| // Generated pad_v2_1_float_relaxed test |
| #include "examples/pad_v2_1_float_relaxed.example.cpp" |
| // Generated model constructor |
| #include "vts_models/pad_v2_1_float_relaxed.model.cpp" |
| } // namespace pad_v2_1_float_relaxed |
| |
| TEST_F(NeuralnetworksHidlTest, pad_v2_1_float_relaxed) { |
| generated_tests::Execute(device, |
| pad_v2_1_float_relaxed::createTestModel, |
| pad_v2_1_float_relaxed::is_ignored, |
| pad_v2_1_float_relaxed::examples); |
| } |
| |
| // Generated from: pad_v2_1_quant8.mod.py. |
| namespace pad_v2_1_quant8 { |
| // Generated pad_v2_1_quant8 test |
| #include "examples/pad_v2_1_quant8.example.cpp" |
| // Generated model constructor |
| #include "vts_models/pad_v2_1_quant8.model.cpp" |
| } // namespace pad_v2_1_quant8 |
| |
| TEST_F(NeuralnetworksHidlTest, pad_v2_1_quant8) { |
| generated_tests::Execute(device, |
| pad_v2_1_quant8::createTestModel, |
| pad_v2_1_quant8::is_ignored, |
| pad_v2_1_quant8::examples); |
| } |
| |
| // Generated from: prelu.mod.py. |
| namespace prelu { |
| // Generated prelu test |
| #include "examples/prelu.example.cpp" |
| // Generated model constructor |
| #include "vts_models/prelu.model.cpp" |
| } // namespace prelu |
| |
| TEST_F(NeuralnetworksHidlTest, prelu) { |
| generated_tests::Execute(device, |
| prelu::createTestModel, |
| prelu::is_ignored, |
| prelu::examples); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, prelu_relaxed) { |
| generated_tests::Execute(device, |
| prelu::createTestModel_relaxed, |
| prelu::is_ignored_relaxed, |
| prelu::examples_relaxed); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, prelu_quant8) { |
| generated_tests::Execute(device, |
| prelu::createTestModel_quant8, |
| prelu::is_ignored_quant8, |
| prelu::examples_quant8); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, prelu_weight_as_input) { |
| generated_tests::Execute(device, |
| prelu::createTestModel_weight_as_input, |
| prelu::is_ignored_weight_as_input, |
| prelu::examples_weight_as_input); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, prelu_weight_as_input_relaxed) { |
| generated_tests::Execute(device, |
| prelu::createTestModel_weight_as_input_relaxed, |
| prelu::is_ignored_weight_as_input_relaxed, |
| prelu::examples_weight_as_input_relaxed); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, prelu_weight_as_input_quant8) { |
| generated_tests::Execute(device, |
| prelu::createTestModel_weight_as_input_quant8, |
| prelu::is_ignored_weight_as_input_quant8, |
| prelu::examples_weight_as_input_quant8); |
| } |
| |
| // Generated from: quantize.mod.py. |
| namespace quantize { |
| // Generated quantize test |
| #include "examples/quantize.example.cpp" |
| // Generated model constructor |
| #include "vts_models/quantize.model.cpp" |
| } // namespace quantize |
| |
| TEST_F(NeuralnetworksHidlTest, quantize_quant8) { |
| generated_tests::Execute(device, |
| quantize::createTestModel_quant8, |
| quantize::is_ignored_quant8, |
| quantize::examples_quant8); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, quantize_quant8_2) { |
| generated_tests::Execute(device, |
| quantize::createTestModel_quant8_2, |
| quantize::is_ignored_quant8_2, |
| quantize::examples_quant8_2); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, quantize_quant8_3) { |
| generated_tests::Execute(device, |
| quantize::createTestModel_quant8_3, |
| quantize::is_ignored_quant8_3, |
| quantize::examples_quant8_3); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, quantize_quant8_4) { |
| generated_tests::Execute(device, |
| quantize::createTestModel_quant8_4, |
| quantize::is_ignored_quant8_4, |
| quantize::examples_quant8_4); |
| } |
| |
| // Generated from: roi_align.mod.py. |
| namespace roi_align { |
| // Generated roi_align test |
| #include "examples/roi_align.example.cpp" |
| // Generated model constructor |
| #include "vts_models/roi_align.model.cpp" |
| } // namespace roi_align |
| |
| TEST_F(NeuralnetworksHidlTest, roi_align) { |
| generated_tests::Execute(device, |
| roi_align::createTestModel, |
| roi_align::is_ignored, |
| roi_align::examples); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, roi_align_relaxed) { |
| generated_tests::Execute(device, |
| roi_align::createTestModel_relaxed, |
| roi_align::is_ignored_relaxed, |
| roi_align::examples_relaxed); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, roi_align_2) { |
| generated_tests::Execute(device, |
| roi_align::createTestModel_2, |
| roi_align::is_ignored_2, |
| roi_align::examples_2); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, roi_align_relaxed_2) { |
| generated_tests::Execute(device, |
| roi_align::createTestModel_relaxed_2, |
| roi_align::is_ignored_relaxed_2, |
| roi_align::examples_relaxed_2); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, roi_align_3) { |
| generated_tests::Execute(device, |
| roi_align::createTestModel_3, |
| roi_align::is_ignored_3, |
| roi_align::examples_3); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, roi_align_relaxed_3) { |
| generated_tests::Execute(device, |
| roi_align::createTestModel_relaxed_3, |
| roi_align::is_ignored_relaxed_3, |
| roi_align::examples_relaxed_3); |
| } |
| |
| // Generated from: split_float_1.mod.py. |
| namespace split_float_1 { |
| // Generated split_float_1 test |
| #include "examples/split_float_1.example.cpp" |
| // Generated model constructor |
| #include "vts_models/split_float_1.model.cpp" |
| } // namespace split_float_1 |
| |
| TEST_F(NeuralnetworksHidlTest, split_float_1) { |
| generated_tests::Execute(device, |
| split_float_1::createTestModel, |
| split_float_1::is_ignored, |
| split_float_1::examples); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, split_float_1_relaxed) { |
| generated_tests::Execute(device, |
| split_float_1::createTestModel_relaxed, |
| split_float_1::is_ignored_relaxed, |
| split_float_1::examples_relaxed); |
| } |
| |
| // Generated from: split_float_2.mod.py. |
| namespace split_float_2 { |
| // Generated split_float_2 test |
| #include "examples/split_float_2.example.cpp" |
| // Generated model constructor |
| #include "vts_models/split_float_2.model.cpp" |
| } // namespace split_float_2 |
| |
| TEST_F(NeuralnetworksHidlTest, split_float_2) { |
| generated_tests::Execute(device, |
| split_float_2::createTestModel, |
| split_float_2::is_ignored, |
| split_float_2::examples); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, split_float_2_relaxed) { |
| generated_tests::Execute(device, |
| split_float_2::createTestModel_relaxed, |
| split_float_2::is_ignored_relaxed, |
| split_float_2::examples_relaxed); |
| } |
| |
| // Generated from: split_float_3.mod.py. |
| namespace split_float_3 { |
| // Generated split_float_3 test |
| #include "examples/split_float_3.example.cpp" |
| // Generated model constructor |
| #include "vts_models/split_float_3.model.cpp" |
| } // namespace split_float_3 |
| |
| TEST_F(NeuralnetworksHidlTest, split_float_3) { |
| generated_tests::Execute(device, |
| split_float_3::createTestModel, |
| split_float_3::is_ignored, |
| split_float_3::examples); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, split_float_3_relaxed) { |
| generated_tests::Execute(device, |
| split_float_3::createTestModel_relaxed, |
| split_float_3::is_ignored_relaxed, |
| split_float_3::examples_relaxed); |
| } |
| |
| // Generated from: split_float_4.mod.py. |
| namespace split_float_4 { |
| // Generated split_float_4 test |
| #include "examples/split_float_4.example.cpp" |
| // Generated model constructor |
| #include "vts_models/split_float_4.model.cpp" |
| } // namespace split_float_4 |
| |
| TEST_F(NeuralnetworksHidlTest, split_float_4) { |
| generated_tests::Execute(device, |
| split_float_4::createTestModel, |
| split_float_4::is_ignored, |
| split_float_4::examples); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, split_float_4_relaxed) { |
| generated_tests::Execute(device, |
| split_float_4::createTestModel_relaxed, |
| split_float_4::is_ignored_relaxed, |
| split_float_4::examples_relaxed); |
| } |
| |
| // Generated from: split_float_5.mod.py. |
| namespace split_float_5 { |
| // Generated split_float_5 test |
| #include "examples/split_float_5.example.cpp" |
| // Generated model constructor |
| #include "vts_models/split_float_5.model.cpp" |
| } // namespace split_float_5 |
| |
| TEST_F(NeuralnetworksHidlTest, split_float_5) { |
| generated_tests::Execute(device, |
| split_float_5::createTestModel, |
| split_float_5::is_ignored, |
| split_float_5::examples); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, split_float_5_relaxed) { |
| generated_tests::Execute(device, |
| split_float_5::createTestModel_relaxed, |
| split_float_5::is_ignored_relaxed, |
| split_float_5::examples_relaxed); |
| } |
| |
| // Generated from: split_int32_1.mod.py. |
| namespace split_int32_1 { |
| // Generated split_int32_1 test |
| #include "examples/split_int32_1.example.cpp" |
| // Generated model constructor |
| #include "vts_models/split_int32_1.model.cpp" |
| } // namespace split_int32_1 |
| |
| TEST_F(NeuralnetworksHidlTest, split_int32_1) { |
| generated_tests::Execute(device, |
| split_int32_1::createTestModel, |
| split_int32_1::is_ignored, |
| split_int32_1::examples); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, split_int32_1_relaxed) { |
| generated_tests::Execute(device, |
| split_int32_1::createTestModel_relaxed, |
| split_int32_1::is_ignored_relaxed, |
| split_int32_1::examples_relaxed); |
| } |
| |
| // Generated from: split_int32_2.mod.py. |
| namespace split_int32_2 { |
| // Generated split_int32_2 test |
| #include "examples/split_int32_2.example.cpp" |
| // Generated model constructor |
| #include "vts_models/split_int32_2.model.cpp" |
| } // namespace split_int32_2 |
| |
| TEST_F(NeuralnetworksHidlTest, split_int32_2) { |
| generated_tests::Execute(device, |
| split_int32_2::createTestModel, |
| split_int32_2::is_ignored, |
| split_int32_2::examples); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, split_int32_2_relaxed) { |
| generated_tests::Execute(device, |
| split_int32_2::createTestModel_relaxed, |
| split_int32_2::is_ignored_relaxed, |
| split_int32_2::examples_relaxed); |
| } |
| |
| // Generated from: split_int32_3.mod.py. |
| namespace split_int32_3 { |
| // Generated split_int32_3 test |
| #include "examples/split_int32_3.example.cpp" |
| // Generated model constructor |
| #include "vts_models/split_int32_3.model.cpp" |
| } // namespace split_int32_3 |
| |
| TEST_F(NeuralnetworksHidlTest, split_int32_3) { |
| generated_tests::Execute(device, |
| split_int32_3::createTestModel, |
| split_int32_3::is_ignored, |
| split_int32_3::examples); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, split_int32_3_relaxed) { |
| generated_tests::Execute(device, |
| split_int32_3::createTestModel_relaxed, |
| split_int32_3::is_ignored_relaxed, |
| split_int32_3::examples_relaxed); |
| } |
| |
| // Generated from: split_int32_4.mod.py. |
| namespace split_int32_4 { |
| // Generated split_int32_4 test |
| #include "examples/split_int32_4.example.cpp" |
| // Generated model constructor |
| #include "vts_models/split_int32_4.model.cpp" |
| } // namespace split_int32_4 |
| |
| TEST_F(NeuralnetworksHidlTest, split_int32_4) { |
| generated_tests::Execute(device, |
| split_int32_4::createTestModel, |
| split_int32_4::is_ignored, |
| split_int32_4::examples); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, split_int32_4_relaxed) { |
| generated_tests::Execute(device, |
| split_int32_4::createTestModel_relaxed, |
| split_int32_4::is_ignored_relaxed, |
| split_int32_4::examples_relaxed); |
| } |
| |
| // Generated from: split_quant8_1.mod.py. |
| namespace split_quant8_1 { |
| // Generated split_quant8_1 test |
| #include "examples/split_quant8_1.example.cpp" |
| // Generated model constructor |
| #include "vts_models/split_quant8_1.model.cpp" |
| } // namespace split_quant8_1 |
| |
| TEST_F(NeuralnetworksHidlTest, split_quant8_1) { |
| generated_tests::Execute(device, |
| split_quant8_1::createTestModel, |
| split_quant8_1::is_ignored, |
| split_quant8_1::examples); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, split_quant8_1_relaxed) { |
| generated_tests::Execute(device, |
| split_quant8_1::createTestModel_relaxed, |
| split_quant8_1::is_ignored_relaxed, |
| split_quant8_1::examples_relaxed); |
| } |
| |
| // Generated from: split_quant8_2.mod.py. |
| namespace split_quant8_2 { |
| // Generated split_quant8_2 test |
| #include "examples/split_quant8_2.example.cpp" |
| // Generated model constructor |
| #include "vts_models/split_quant8_2.model.cpp" |
| } // namespace split_quant8_2 |
| |
| TEST_F(NeuralnetworksHidlTest, split_quant8_2) { |
| generated_tests::Execute(device, |
| split_quant8_2::createTestModel, |
| split_quant8_2::is_ignored, |
| split_quant8_2::examples); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, split_quant8_2_relaxed) { |
| generated_tests::Execute(device, |
| split_quant8_2::createTestModel_relaxed, |
| split_quant8_2::is_ignored_relaxed, |
| split_quant8_2::examples_relaxed); |
| } |
| |
| // Generated from: split_quant8_3.mod.py. |
| namespace split_quant8_3 { |
| // Generated split_quant8_3 test |
| #include "examples/split_quant8_3.example.cpp" |
| // Generated model constructor |
| #include "vts_models/split_quant8_3.model.cpp" |
| } // namespace split_quant8_3 |
| |
| TEST_F(NeuralnetworksHidlTest, split_quant8_3) { |
| generated_tests::Execute(device, |
| split_quant8_3::createTestModel, |
| split_quant8_3::is_ignored, |
| split_quant8_3::examples); |
| } |
| |
| // Generated from: split_quant8_4.mod.py. |
| namespace split_quant8_4 { |
| // Generated split_quant8_4 test |
| #include "examples/split_quant8_4.example.cpp" |
| // Generated model constructor |
| #include "vts_models/split_quant8_4.model.cpp" |
| } // namespace split_quant8_4 |
| |
| TEST_F(NeuralnetworksHidlTest, split_quant8_4) { |
| generated_tests::Execute(device, |
| split_quant8_4::createTestModel, |
| split_quant8_4::is_ignored, |
| split_quant8_4::examples); |
| } |
| |
| // Generated from: tile_1.mod.py. |
| namespace tile_1 { |
| // Generated tile_1 test |
| #include "examples/tile_1.example.cpp" |
| // Generated model constructor |
| #include "vts_models/tile_1.model.cpp" |
| } // namespace tile_1 |
| |
| TEST_F(NeuralnetworksHidlTest, tile_1) { |
| generated_tests::Execute(device, |
| tile_1::createTestModel, |
| tile_1::is_ignored, |
| tile_1::examples); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, tile_1_relaxed) { |
| generated_tests::Execute(device, |
| tile_1::createTestModel_relaxed, |
| tile_1::is_ignored_relaxed, |
| tile_1::examples_relaxed); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, tile_1_quant8) { |
| generated_tests::Execute(device, |
| tile_1::createTestModel_quant8, |
| tile_1::is_ignored_quant8, |
| tile_1::examples_quant8); |
| } |
| |
| // Generated from: tile_2.mod.py. |
| namespace tile_2 { |
| // Generated tile_2 test |
| #include "examples/tile_2.example.cpp" |
| // Generated model constructor |
| #include "vts_models/tile_2.model.cpp" |
| } // namespace tile_2 |
| |
| TEST_F(NeuralnetworksHidlTest, tile_2) { |
| generated_tests::Execute(device, |
| tile_2::createTestModel, |
| tile_2::is_ignored, |
| tile_2::examples); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, tile_2_relaxed) { |
| generated_tests::Execute(device, |
| tile_2::createTestModel_relaxed, |
| tile_2::is_ignored_relaxed, |
| tile_2::examples_relaxed); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, tile_2_quant8) { |
| generated_tests::Execute(device, |
| tile_2::createTestModel_quant8, |
| tile_2::is_ignored_quant8, |
| tile_2::examples_quant8); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, tile_2_int32) { |
| generated_tests::Execute(device, |
| tile_2::createTestModel_int32, |
| tile_2::is_ignored_int32, |
| tile_2::examples_int32); |
| } |
| |
| // Generated from: tile_3.mod.py. |
| namespace tile_3 { |
| // Generated tile_3 test |
| #include "examples/tile_3.example.cpp" |
| // Generated model constructor |
| #include "vts_models/tile_3.model.cpp" |
| } // namespace tile_3 |
| |
| TEST_F(NeuralnetworksHidlTest, tile_3) { |
| generated_tests::Execute(device, |
| tile_3::createTestModel, |
| tile_3::is_ignored, |
| tile_3::examples); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, tile_3_relaxed) { |
| generated_tests::Execute(device, |
| tile_3::createTestModel_relaxed, |
| tile_3::is_ignored_relaxed, |
| tile_3::examples_relaxed); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, tile_3_quant8) { |
| generated_tests::Execute(device, |
| tile_3::createTestModel_quant8, |
| tile_3::is_ignored_quant8, |
| tile_3::examples_quant8); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, tile_3_int32) { |
| generated_tests::Execute(device, |
| tile_3::createTestModel_int32, |
| tile_3::is_ignored_int32, |
| tile_3::examples_int32); |
| } |
| |
| // Generated from: transpose_conv2d.mod.py. |
| namespace transpose_conv2d { |
| // Generated transpose_conv2d test |
| #include "examples/transpose_conv2d.example.cpp" |
| // Generated model constructor |
| #include "vts_models/transpose_conv2d.model.cpp" |
| } // namespace transpose_conv2d |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel, |
| transpose_conv2d::is_ignored, |
| transpose_conv2d::examples); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_weight_as_input) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_weight_as_input, |
| transpose_conv2d::is_ignored_weight_as_input, |
| transpose_conv2d::examples_weight_as_input); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_relaxed) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_relaxed, |
| transpose_conv2d::is_ignored_relaxed, |
| transpose_conv2d::examples_relaxed); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_relaxed_weight_as_input) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_relaxed_weight_as_input, |
| transpose_conv2d::is_ignored_relaxed_weight_as_input, |
| transpose_conv2d::examples_relaxed_weight_as_input); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_quant8) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_quant8, |
| transpose_conv2d::is_ignored_quant8, |
| transpose_conv2d::examples_quant8); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_quant8_weight_as_input) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_quant8_weight_as_input, |
| transpose_conv2d::is_ignored_quant8_weight_as_input, |
| transpose_conv2d::examples_quant8_weight_as_input); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_2) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_2, |
| transpose_conv2d::is_ignored_2, |
| transpose_conv2d::examples_2); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_weight_as_input_2) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_weight_as_input_2, |
| transpose_conv2d::is_ignored_weight_as_input_2, |
| transpose_conv2d::examples_weight_as_input_2); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_relaxed_2) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_relaxed_2, |
| transpose_conv2d::is_ignored_relaxed_2, |
| transpose_conv2d::examples_relaxed_2); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_relaxed_weight_as_input_2) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_relaxed_weight_as_input_2, |
| transpose_conv2d::is_ignored_relaxed_weight_as_input_2, |
| transpose_conv2d::examples_relaxed_weight_as_input_2); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_quant8_2) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_quant8_2, |
| transpose_conv2d::is_ignored_quant8_2, |
| transpose_conv2d::examples_quant8_2); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_quant8_weight_as_input_2) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_quant8_weight_as_input_2, |
| transpose_conv2d::is_ignored_quant8_weight_as_input_2, |
| transpose_conv2d::examples_quant8_weight_as_input_2); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_3) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_3, |
| transpose_conv2d::is_ignored_3, |
| transpose_conv2d::examples_3); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_weight_as_input_3) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_weight_as_input_3, |
| transpose_conv2d::is_ignored_weight_as_input_3, |
| transpose_conv2d::examples_weight_as_input_3); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_relaxed_3) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_relaxed_3, |
| transpose_conv2d::is_ignored_relaxed_3, |
| transpose_conv2d::examples_relaxed_3); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_relaxed_weight_as_input_3) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_relaxed_weight_as_input_3, |
| transpose_conv2d::is_ignored_relaxed_weight_as_input_3, |
| transpose_conv2d::examples_relaxed_weight_as_input_3); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_quant8_3) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_quant8_3, |
| transpose_conv2d::is_ignored_quant8_3, |
| transpose_conv2d::examples_quant8_3); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_quant8_weight_as_input_3) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_quant8_weight_as_input_3, |
| transpose_conv2d::is_ignored_quant8_weight_as_input_3, |
| transpose_conv2d::examples_quant8_weight_as_input_3); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_4) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_4, |
| transpose_conv2d::is_ignored_4, |
| transpose_conv2d::examples_4); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_weight_as_input_4) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_weight_as_input_4, |
| transpose_conv2d::is_ignored_weight_as_input_4, |
| transpose_conv2d::examples_weight_as_input_4); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_relaxed_4) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_relaxed_4, |
| transpose_conv2d::is_ignored_relaxed_4, |
| transpose_conv2d::examples_relaxed_4); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_relaxed_weight_as_input_4) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_relaxed_weight_as_input_4, |
| transpose_conv2d::is_ignored_relaxed_weight_as_input_4, |
| transpose_conv2d::examples_relaxed_weight_as_input_4); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_quant8_4) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_quant8_4, |
| transpose_conv2d::is_ignored_quant8_4, |
| transpose_conv2d::examples_quant8_4); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_quant8_weight_as_input_4) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_quant8_weight_as_input_4, |
| transpose_conv2d::is_ignored_quant8_weight_as_input_4, |
| transpose_conv2d::examples_quant8_weight_as_input_4); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_5) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_5, |
| transpose_conv2d::is_ignored_5, |
| transpose_conv2d::examples_5); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_weight_as_input_5) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_weight_as_input_5, |
| transpose_conv2d::is_ignored_weight_as_input_5, |
| transpose_conv2d::examples_weight_as_input_5); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_relaxed_5) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_relaxed_5, |
| transpose_conv2d::is_ignored_relaxed_5, |
| transpose_conv2d::examples_relaxed_5); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_relaxed_weight_as_input_5) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_relaxed_weight_as_input_5, |
| transpose_conv2d::is_ignored_relaxed_weight_as_input_5, |
| transpose_conv2d::examples_relaxed_weight_as_input_5); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_quant8_5) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_quant8_5, |
| transpose_conv2d::is_ignored_quant8_5, |
| transpose_conv2d::examples_quant8_5); |
| } |
| |
| TEST_F(NeuralnetworksHidlTest, transpose_conv2d_quant8_weight_as_input_5) { |
| generated_tests::Execute(device, |
| transpose_conv2d::createTestModel_quant8_weight_as_input_5, |
| transpose_conv2d::is_ignored_quant8_weight_as_input_5, |
| transpose_conv2d::examples_quant8_weight_as_input_5); |
| } |
| |