| // DO NOT EDIT; |
| // Generated by ml/nn/runtime/test/specs/generate_vts_test.sh |
| |
| namespace add_broadcast_quant8 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated add_broadcast_quant8 test |
| #include "examples/add_broadcast_quant8.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/add_broadcast_quant8.model.cpp" |
| } // namespace add_broadcast_quant8 |
| TEST_F(NeuralnetworksHidlTest, add_broadcast_quant8) { |
| generated_tests::Execute(device, |
| add_broadcast_quant8::createTestModel, |
| add_broadcast_quant8::is_ignored, |
| add_broadcast_quant8::examples); |
| } |
| |
| namespace add { |
| std::vector<MixedTypedExample> examples = { |
| // Generated add test |
| #include "examples/add.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/add.model.cpp" |
| } // namespace add |
| TEST_F(NeuralnetworksHidlTest, add) { |
| generated_tests::Execute(device, |
| add::createTestModel, |
| add::is_ignored, |
| add::examples); |
| } |
| |
| namespace add_quant8 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated add_quant8 test |
| #include "examples/add_quant8.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/add_quant8.model.cpp" |
| } // namespace add_quant8 |
| TEST_F(NeuralnetworksHidlTest, add_quant8) { |
| generated_tests::Execute(device, |
| add_quant8::createTestModel, |
| add_quant8::is_ignored, |
| add_quant8::examples); |
| } |
| |
| namespace avg_pool_float_1 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated avg_pool_float_1 test |
| #include "examples/avg_pool_float_1.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/avg_pool_float_1.model.cpp" |
| } // namespace avg_pool_float_1 |
| TEST_F(NeuralnetworksHidlTest, avg_pool_float_1) { |
| generated_tests::Execute(device, |
| avg_pool_float_1::createTestModel, |
| avg_pool_float_1::is_ignored, |
| avg_pool_float_1::examples); |
| } |
| |
| namespace avg_pool_float_2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated avg_pool_float_2 test |
| #include "examples/avg_pool_float_2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/avg_pool_float_2.model.cpp" |
| } // namespace avg_pool_float_2 |
| TEST_F(NeuralnetworksHidlTest, avg_pool_float_2) { |
| generated_tests::Execute(device, |
| avg_pool_float_2::createTestModel, |
| avg_pool_float_2::is_ignored, |
| avg_pool_float_2::examples); |
| } |
| |
| namespace avg_pool_float_3 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated avg_pool_float_3 test |
| #include "examples/avg_pool_float_3.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/avg_pool_float_3.model.cpp" |
| } // namespace avg_pool_float_3 |
| TEST_F(NeuralnetworksHidlTest, avg_pool_float_3) { |
| generated_tests::Execute(device, |
| avg_pool_float_3::createTestModel, |
| avg_pool_float_3::is_ignored, |
| avg_pool_float_3::examples); |
| } |
| |
| namespace avg_pool_float_4 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated avg_pool_float_4 test |
| #include "examples/avg_pool_float_4.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/avg_pool_float_4.model.cpp" |
| } // namespace avg_pool_float_4 |
| TEST_F(NeuralnetworksHidlTest, avg_pool_float_4) { |
| generated_tests::Execute(device, |
| avg_pool_float_4::createTestModel, |
| avg_pool_float_4::is_ignored, |
| avg_pool_float_4::examples); |
| } |
| |
| namespace avg_pool_quant8_1 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated avg_pool_quant8_1 test |
| #include "examples/avg_pool_quant8_1.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/avg_pool_quant8_1.model.cpp" |
| } // namespace avg_pool_quant8_1 |
| TEST_F(NeuralnetworksHidlTest, avg_pool_quant8_1) { |
| generated_tests::Execute(device, |
| avg_pool_quant8_1::createTestModel, |
| avg_pool_quant8_1::is_ignored, |
| avg_pool_quant8_1::examples); |
| } |
| |
| namespace avg_pool_quant8_2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated avg_pool_quant8_2 test |
| #include "examples/avg_pool_quant8_2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/avg_pool_quant8_2.model.cpp" |
| } // namespace avg_pool_quant8_2 |
| TEST_F(NeuralnetworksHidlTest, avg_pool_quant8_2) { |
| generated_tests::Execute(device, |
| avg_pool_quant8_2::createTestModel, |
| avg_pool_quant8_2::is_ignored, |
| avg_pool_quant8_2::examples); |
| } |
| |
| namespace avg_pool_quant8_3 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated avg_pool_quant8_3 test |
| #include "examples/avg_pool_quant8_3.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/avg_pool_quant8_3.model.cpp" |
| } // namespace avg_pool_quant8_3 |
| TEST_F(NeuralnetworksHidlTest, avg_pool_quant8_3) { |
| generated_tests::Execute(device, |
| avg_pool_quant8_3::createTestModel, |
| avg_pool_quant8_3::is_ignored, |
| avg_pool_quant8_3::examples); |
| } |
| |
| namespace avg_pool_quant8_4 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated avg_pool_quant8_4 test |
| #include "examples/avg_pool_quant8_4.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/avg_pool_quant8_4.model.cpp" |
| } // namespace avg_pool_quant8_4 |
| TEST_F(NeuralnetworksHidlTest, avg_pool_quant8_4) { |
| generated_tests::Execute(device, |
| avg_pool_quant8_4::createTestModel, |
| avg_pool_quant8_4::is_ignored, |
| avg_pool_quant8_4::examples); |
| } |
| |
| namespace concat_float_1 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated concat_float_1 test |
| #include "examples/concat_float_1.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/concat_float_1.model.cpp" |
| } // namespace concat_float_1 |
| TEST_F(NeuralnetworksHidlTest, concat_float_1) { |
| generated_tests::Execute(device, |
| concat_float_1::createTestModel, |
| concat_float_1::is_ignored, |
| concat_float_1::examples); |
| } |
| |
| namespace concat_float_2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated concat_float_2 test |
| #include "examples/concat_float_2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/concat_float_2.model.cpp" |
| } // namespace concat_float_2 |
| TEST_F(NeuralnetworksHidlTest, concat_float_2) { |
| generated_tests::Execute(device, |
| concat_float_2::createTestModel, |
| concat_float_2::is_ignored, |
| concat_float_2::examples); |
| } |
| |
| namespace concat_float_3 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated concat_float_3 test |
| #include "examples/concat_float_3.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/concat_float_3.model.cpp" |
| } // namespace concat_float_3 |
| TEST_F(NeuralnetworksHidlTest, concat_float_3) { |
| generated_tests::Execute(device, |
| concat_float_3::createTestModel, |
| concat_float_3::is_ignored, |
| concat_float_3::examples); |
| } |
| |
| namespace concat_quant8_1 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated concat_quant8_1 test |
| #include "examples/concat_quant8_1.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/concat_quant8_1.model.cpp" |
| } // namespace concat_quant8_1 |
| TEST_F(NeuralnetworksHidlTest, concat_quant8_1) { |
| generated_tests::Execute(device, |
| concat_quant8_1::createTestModel, |
| concat_quant8_1::is_ignored, |
| concat_quant8_1::examples); |
| } |
| |
| namespace concat_quant8_2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated concat_quant8_2 test |
| #include "examples/concat_quant8_2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/concat_quant8_2.model.cpp" |
| } // namespace concat_quant8_2 |
| TEST_F(NeuralnetworksHidlTest, concat_quant8_2) { |
| generated_tests::Execute(device, |
| concat_quant8_2::createTestModel, |
| concat_quant8_2::is_ignored, |
| concat_quant8_2::examples); |
| } |
| |
| namespace concat_quant8_3 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated concat_quant8_3 test |
| #include "examples/concat_quant8_3.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/concat_quant8_3.model.cpp" |
| } // namespace concat_quant8_3 |
| TEST_F(NeuralnetworksHidlTest, concat_quant8_3) { |
| generated_tests::Execute(device, |
| concat_quant8_3::createTestModel, |
| concat_quant8_3::is_ignored, |
| concat_quant8_3::examples); |
| } |
| |
| namespace conv_float_channels { |
| std::vector<MixedTypedExample> examples = { |
| // Generated conv_float_channels test |
| #include "examples/conv_float_channels.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/conv_float_channels.model.cpp" |
| } // namespace conv_float_channels |
| TEST_F(NeuralnetworksHidlTest, conv_float_channels) { |
| generated_tests::Execute(device, |
| conv_float_channels::createTestModel, |
| conv_float_channels::is_ignored, |
| conv_float_channels::examples); |
| } |
| |
| namespace conv_float_large { |
| std::vector<MixedTypedExample> examples = { |
| // Generated conv_float_large test |
| #include "examples/conv_float_large.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/conv_float_large.model.cpp" |
| } // namespace conv_float_large |
| TEST_F(NeuralnetworksHidlTest, conv_float_large) { |
| generated_tests::Execute(device, |
| conv_float_large::createTestModel, |
| conv_float_large::is_ignored, |
| conv_float_large::examples); |
| } |
| |
| namespace conv_float { |
| std::vector<MixedTypedExample> examples = { |
| // Generated conv_float test |
| #include "examples/conv_float.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/conv_float.model.cpp" |
| } // namespace conv_float |
| TEST_F(NeuralnetworksHidlTest, conv_float) { |
| generated_tests::Execute(device, |
| conv_float::createTestModel, |
| conv_float::is_ignored, |
| conv_float::examples); |
| } |
| |
| namespace conv_quant8_channels { |
| std::vector<MixedTypedExample> examples = { |
| // Generated conv_quant8_channels test |
| #include "examples/conv_quant8_channels.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/conv_quant8_channels.model.cpp" |
| } // namespace conv_quant8_channels |
| TEST_F(NeuralnetworksHidlTest, conv_quant8_channels) { |
| generated_tests::Execute(device, |
| conv_quant8_channels::createTestModel, |
| conv_quant8_channels::is_ignored, |
| conv_quant8_channels::examples); |
| } |
| |
| namespace conv_quant8_large { |
| std::vector<MixedTypedExample> examples = { |
| // Generated conv_quant8_large test |
| #include "examples/conv_quant8_large.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/conv_quant8_large.model.cpp" |
| } // namespace conv_quant8_large |
| TEST_F(NeuralnetworksHidlTest, conv_quant8_large) { |
| generated_tests::Execute(device, |
| conv_quant8_large::createTestModel, |
| conv_quant8_large::is_ignored, |
| conv_quant8_large::examples); |
| } |
| |
| namespace conv_quant8 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated conv_quant8 test |
| #include "examples/conv_quant8.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/conv_quant8.model.cpp" |
| } // namespace conv_quant8 |
| TEST_F(NeuralnetworksHidlTest, conv_quant8) { |
| generated_tests::Execute(device, |
| conv_quant8::createTestModel, |
| conv_quant8::is_ignored, |
| conv_quant8::examples); |
| } |
| |
| namespace conv_quant8_overflow { |
| std::vector<MixedTypedExample> examples = { |
| // Generated conv_quant8_overflow test |
| #include "examples/conv_quant8_overflow.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/conv_quant8_overflow.model.cpp" |
| } // namespace conv_quant8_overflow |
| TEST_F(NeuralnetworksHidlTest, conv_quant8_overflow) { |
| generated_tests::Execute(device, |
| conv_quant8_overflow::createTestModel, |
| conv_quant8_overflow::is_ignored, |
| conv_quant8_overflow::examples); |
| } |
| |
| namespace depth_to_space_float_1 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated depth_to_space_float_1 test |
| #include "examples/depth_to_space_float_1.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/depth_to_space_float_1.model.cpp" |
| } // namespace depth_to_space_float_1 |
| TEST_F(NeuralnetworksHidlTest, depth_to_space_float_1) { |
| generated_tests::Execute(device, |
| depth_to_space_float_1::createTestModel, |
| depth_to_space_float_1::is_ignored, |
| depth_to_space_float_1::examples); |
| } |
| |
| namespace depth_to_space_float_2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated depth_to_space_float_2 test |
| #include "examples/depth_to_space_float_2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/depth_to_space_float_2.model.cpp" |
| } // namespace depth_to_space_float_2 |
| TEST_F(NeuralnetworksHidlTest, depth_to_space_float_2) { |
| generated_tests::Execute(device, |
| depth_to_space_float_2::createTestModel, |
| depth_to_space_float_2::is_ignored, |
| depth_to_space_float_2::examples); |
| } |
| |
| namespace depth_to_space_quant8_1 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated depth_to_space_quant8_1 test |
| #include "examples/depth_to_space_quant8_1.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/depth_to_space_quant8_1.model.cpp" |
| } // namespace depth_to_space_quant8_1 |
| TEST_F(NeuralnetworksHidlTest, depth_to_space_quant8_1) { |
| generated_tests::Execute(device, |
| depth_to_space_quant8_1::createTestModel, |
| depth_to_space_quant8_1::is_ignored, |
| depth_to_space_quant8_1::examples); |
| } |
| |
| namespace depth_to_space_quant8_2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated depth_to_space_quant8_2 test |
| #include "examples/depth_to_space_quant8_2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/depth_to_space_quant8_2.model.cpp" |
| } // namespace depth_to_space_quant8_2 |
| TEST_F(NeuralnetworksHidlTest, depth_to_space_quant8_2) { |
| generated_tests::Execute(device, |
| depth_to_space_quant8_2::createTestModel, |
| depth_to_space_quant8_2::is_ignored, |
| depth_to_space_quant8_2::examples); |
| } |
| |
| namespace depthwise_conv2d_float_large_2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated depthwise_conv2d_float_large_2 test |
| #include "examples/depthwise_conv2d_float_large_2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/depthwise_conv2d_float_large_2.model.cpp" |
| } // namespace depthwise_conv2d_float_large_2 |
| TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_float_large_2) { |
| generated_tests::Execute(device, |
| depthwise_conv2d_float_large_2::createTestModel, |
| depthwise_conv2d_float_large_2::is_ignored, |
| depthwise_conv2d_float_large_2::examples); |
| } |
| |
| namespace depthwise_conv2d_float_large { |
| std::vector<MixedTypedExample> examples = { |
| // Generated depthwise_conv2d_float_large test |
| #include "examples/depthwise_conv2d_float_large.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/depthwise_conv2d_float_large.model.cpp" |
| } // namespace depthwise_conv2d_float_large |
| TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_float_large) { |
| generated_tests::Execute(device, |
| depthwise_conv2d_float_large::createTestModel, |
| depthwise_conv2d_float_large::is_ignored, |
| depthwise_conv2d_float_large::examples); |
| } |
| |
| namespace depthwise_conv2d_float { |
| std::vector<MixedTypedExample> examples = { |
| // Generated depthwise_conv2d_float test |
| #include "examples/depthwise_conv2d_float.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/depthwise_conv2d_float.model.cpp" |
| } // namespace depthwise_conv2d_float |
| TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_float) { |
| generated_tests::Execute(device, |
| depthwise_conv2d_float::createTestModel, |
| depthwise_conv2d_float::is_ignored, |
| depthwise_conv2d_float::examples); |
| } |
| |
| namespace depthwise_conv2d_quant8_large { |
| std::vector<MixedTypedExample> examples = { |
| // Generated depthwise_conv2d_quant8_large test |
| #include "examples/depthwise_conv2d_quant8_large.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/depthwise_conv2d_quant8_large.model.cpp" |
| } // namespace depthwise_conv2d_quant8_large |
| TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_quant8_large) { |
| generated_tests::Execute(device, |
| depthwise_conv2d_quant8_large::createTestModel, |
| depthwise_conv2d_quant8_large::is_ignored, |
| depthwise_conv2d_quant8_large::examples); |
| } |
| |
| namespace depthwise_conv2d_quant8 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated depthwise_conv2d_quant8 test |
| #include "examples/depthwise_conv2d_quant8.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/depthwise_conv2d_quant8.model.cpp" |
| } // namespace depthwise_conv2d_quant8 |
| TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_quant8) { |
| generated_tests::Execute(device, |
| depthwise_conv2d_quant8::createTestModel, |
| depthwise_conv2d_quant8::is_ignored, |
| depthwise_conv2d_quant8::examples); |
| } |
| |
| namespace dequantize { |
| std::vector<MixedTypedExample> examples = { |
| // Generated dequantize test |
| #include "examples/dequantize.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/dequantize.model.cpp" |
| } // namespace dequantize |
| TEST_F(NeuralnetworksHidlTest, dequantize) { |
| generated_tests::Execute(device, |
| dequantize::createTestModel, |
| dequantize::is_ignored, |
| dequantize::examples); |
| } |
| |
| namespace embedding_lookup { |
| std::vector<MixedTypedExample> examples = { |
| // Generated embedding_lookup test |
| #include "examples/embedding_lookup.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/embedding_lookup.model.cpp" |
| } // namespace embedding_lookup |
| TEST_F(NeuralnetworksHidlTest, embedding_lookup) { |
| generated_tests::Execute(device, |
| embedding_lookup::createTestModel, |
| embedding_lookup::is_ignored, |
| embedding_lookup::examples); |
| } |
| |
| namespace floor { |
| std::vector<MixedTypedExample> examples = { |
| // Generated floor test |
| #include "examples/floor.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/floor.model.cpp" |
| } // namespace floor |
| TEST_F(NeuralnetworksHidlTest, floor) { |
| generated_tests::Execute(device, |
| floor::createTestModel, |
| floor::is_ignored, |
| floor::examples); |
| } |
| |
| namespace fully_connected_float_large { |
| std::vector<MixedTypedExample> examples = { |
| // Generated fully_connected_float_large test |
| #include "examples/fully_connected_float_large.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/fully_connected_float_large.model.cpp" |
| } // namespace fully_connected_float_large |
| TEST_F(NeuralnetworksHidlTest, fully_connected_float_large) { |
| generated_tests::Execute(device, |
| fully_connected_float_large::createTestModel, |
| fully_connected_float_large::is_ignored, |
| fully_connected_float_large::examples); |
| } |
| |
| namespace fully_connected_float { |
| std::vector<MixedTypedExample> examples = { |
| // Generated fully_connected_float test |
| #include "examples/fully_connected_float.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/fully_connected_float.model.cpp" |
| } // namespace fully_connected_float |
| TEST_F(NeuralnetworksHidlTest, fully_connected_float) { |
| generated_tests::Execute(device, |
| fully_connected_float::createTestModel, |
| fully_connected_float::is_ignored, |
| fully_connected_float::examples); |
| } |
| |
| namespace fully_connected_quant8_large { |
| std::vector<MixedTypedExample> examples = { |
| // Generated fully_connected_quant8_large test |
| #include "examples/fully_connected_quant8_large.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/fully_connected_quant8_large.model.cpp" |
| } // namespace fully_connected_quant8_large |
| TEST_F(NeuralnetworksHidlTest, fully_connected_quant8_large) { |
| generated_tests::Execute(device, |
| fully_connected_quant8_large::createTestModel, |
| fully_connected_quant8_large::is_ignored, |
| fully_connected_quant8_large::examples); |
| } |
| |
| namespace fully_connected_quant8 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated fully_connected_quant8 test |
| #include "examples/fully_connected_quant8.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/fully_connected_quant8.model.cpp" |
| } // namespace fully_connected_quant8 |
| TEST_F(NeuralnetworksHidlTest, fully_connected_quant8) { |
| generated_tests::Execute(device, |
| fully_connected_quant8::createTestModel, |
| fully_connected_quant8::is_ignored, |
| fully_connected_quant8::examples); |
| } |
| |
| namespace hashtable_lookup_float { |
| std::vector<MixedTypedExample> examples = { |
| // Generated hashtable_lookup_float test |
| #include "examples/hashtable_lookup_float.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/hashtable_lookup_float.model.cpp" |
| } // namespace hashtable_lookup_float |
| TEST_F(NeuralnetworksHidlTest, hashtable_lookup_float) { |
| generated_tests::Execute(device, |
| hashtable_lookup_float::createTestModel, |
| hashtable_lookup_float::is_ignored, |
| hashtable_lookup_float::examples); |
| } |
| |
| namespace hashtable_lookup_quant8 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated hashtable_lookup_quant8 test |
| #include "examples/hashtable_lookup_quant8.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/hashtable_lookup_quant8.model.cpp" |
| } // namespace hashtable_lookup_quant8 |
| TEST_F(NeuralnetworksHidlTest, hashtable_lookup_quant8) { |
| generated_tests::Execute(device, |
| hashtable_lookup_quant8::createTestModel, |
| hashtable_lookup_quant8::is_ignored, |
| hashtable_lookup_quant8::examples); |
| } |
| |
| namespace l2_normalization { |
| std::vector<MixedTypedExample> examples = { |
| // Generated l2_normalization test |
| #include "examples/l2_normalization.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/l2_normalization.model.cpp" |
| } // namespace l2_normalization |
| TEST_F(NeuralnetworksHidlTest, l2_normalization) { |
| generated_tests::Execute(device, |
| l2_normalization::createTestModel, |
| l2_normalization::is_ignored, |
| l2_normalization::examples); |
| } |
| |
| namespace l2_pool_float { |
| std::vector<MixedTypedExample> examples = { |
| // Generated l2_pool_float test |
| #include "examples/l2_pool_float.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/l2_pool_float.model.cpp" |
| } // namespace l2_pool_float |
| TEST_F(NeuralnetworksHidlTest, l2_pool_float) { |
| generated_tests::Execute(device, |
| l2_pool_float::createTestModel, |
| l2_pool_float::is_ignored, |
| l2_pool_float::examples); |
| } |
| |
| namespace local_response_norm_float_1 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated local_response_norm_float_1 test |
| #include "examples/local_response_norm_float_1.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/local_response_norm_float_1.model.cpp" |
| } // namespace local_response_norm_float_1 |
| TEST_F(NeuralnetworksHidlTest, local_response_norm_float_1) { |
| generated_tests::Execute(device, |
| local_response_norm_float_1::createTestModel, |
| local_response_norm_float_1::is_ignored, |
| local_response_norm_float_1::examples); |
| } |
| |
| namespace local_response_norm_float_2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated local_response_norm_float_2 test |
| #include "examples/local_response_norm_float_2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/local_response_norm_float_2.model.cpp" |
| } // namespace local_response_norm_float_2 |
| TEST_F(NeuralnetworksHidlTest, local_response_norm_float_2) { |
| generated_tests::Execute(device, |
| local_response_norm_float_2::createTestModel, |
| local_response_norm_float_2::is_ignored, |
| local_response_norm_float_2::examples); |
| } |
| |
| namespace local_response_norm_float_3 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated local_response_norm_float_3 test |
| #include "examples/local_response_norm_float_3.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/local_response_norm_float_3.model.cpp" |
| } // namespace local_response_norm_float_3 |
| TEST_F(NeuralnetworksHidlTest, local_response_norm_float_3) { |
| generated_tests::Execute(device, |
| local_response_norm_float_3::createTestModel, |
| local_response_norm_float_3::is_ignored, |
| local_response_norm_float_3::examples); |
| } |
| |
| namespace local_response_norm_float_4 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated local_response_norm_float_4 test |
| #include "examples/local_response_norm_float_4.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/local_response_norm_float_4.model.cpp" |
| } // namespace local_response_norm_float_4 |
| TEST_F(NeuralnetworksHidlTest, local_response_norm_float_4) { |
| generated_tests::Execute(device, |
| local_response_norm_float_4::createTestModel, |
| local_response_norm_float_4::is_ignored, |
| local_response_norm_float_4::examples); |
| } |
| |
| namespace logistic_float_1 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated logistic_float_1 test |
| #include "examples/logistic_float_1.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/logistic_float_1.model.cpp" |
| } // namespace logistic_float_1 |
| TEST_F(NeuralnetworksHidlTest, logistic_float_1) { |
| generated_tests::Execute(device, |
| logistic_float_1::createTestModel, |
| logistic_float_1::is_ignored, |
| logistic_float_1::examples); |
| } |
| |
| namespace logistic_float_2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated logistic_float_2 test |
| #include "examples/logistic_float_2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/logistic_float_2.model.cpp" |
| } // namespace logistic_float_2 |
| TEST_F(NeuralnetworksHidlTest, logistic_float_2) { |
| generated_tests::Execute(device, |
| logistic_float_2::createTestModel, |
| logistic_float_2::is_ignored, |
| logistic_float_2::examples); |
| } |
| |
| namespace logistic_quant8_1 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated logistic_quant8_1 test |
| #include "examples/logistic_quant8_1.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/logistic_quant8_1.model.cpp" |
| } // namespace logistic_quant8_1 |
| TEST_F(NeuralnetworksHidlTest, logistic_quant8_1) { |
| generated_tests::Execute(device, |
| logistic_quant8_1::createTestModel, |
| logistic_quant8_1::is_ignored, |
| logistic_quant8_1::examples); |
| } |
| |
| namespace logistic_quant8_2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated logistic_quant8_2 test |
| #include "examples/logistic_quant8_2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/logistic_quant8_2.model.cpp" |
| } // namespace logistic_quant8_2 |
| TEST_F(NeuralnetworksHidlTest, logistic_quant8_2) { |
| generated_tests::Execute(device, |
| logistic_quant8_2::createTestModel, |
| logistic_quant8_2::is_ignored, |
| logistic_quant8_2::examples); |
| } |
| |
| namespace lsh_projection { |
| std::vector<MixedTypedExample> examples = { |
| // Generated lsh_projection test |
| #include "examples/lsh_projection.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/lsh_projection.model.cpp" |
| } // namespace lsh_projection |
| TEST_F(NeuralnetworksHidlTest, lsh_projection) { |
| generated_tests::Execute(device, |
| lsh_projection::createTestModel, |
| lsh_projection::is_ignored, |
| lsh_projection::examples); |
| } |
| |
| namespace lstm2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated lstm2 test |
| #include "examples/lstm2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/lstm2.model.cpp" |
| } // namespace lstm2 |
| TEST_F(NeuralnetworksHidlTest, lstm2) { |
| generated_tests::Execute(device, |
| lstm2::createTestModel, |
| lstm2::is_ignored, |
| lstm2::examples); |
| } |
| |
| namespace lstm3 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated lstm3 test |
| #include "examples/lstm3.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/lstm3.model.cpp" |
| } // namespace lstm3 |
| TEST_F(NeuralnetworksHidlTest, lstm3) { |
| generated_tests::Execute(device, |
| lstm3::createTestModel, |
| lstm3::is_ignored, |
| lstm3::examples); |
| } |
| |
| namespace lstm { |
| std::vector<MixedTypedExample> examples = { |
| // Generated lstm test |
| #include "examples/lstm.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/lstm.model.cpp" |
| } // namespace lstm |
| TEST_F(NeuralnetworksHidlTest, lstm) { |
| generated_tests::Execute(device, |
| lstm::createTestModel, |
| lstm::is_ignored, |
| lstm::examples); |
| } |
| |
| namespace max_pool_float_1 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated max_pool_float_1 test |
| #include "examples/max_pool_float_1.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/max_pool_float_1.model.cpp" |
| } // namespace max_pool_float_1 |
| TEST_F(NeuralnetworksHidlTest, max_pool_float_1) { |
| generated_tests::Execute(device, |
| max_pool_float_1::createTestModel, |
| max_pool_float_1::is_ignored, |
| max_pool_float_1::examples); |
| } |
| |
| namespace max_pool_float_2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated max_pool_float_2 test |
| #include "examples/max_pool_float_2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/max_pool_float_2.model.cpp" |
| } // namespace max_pool_float_2 |
| TEST_F(NeuralnetworksHidlTest, max_pool_float_2) { |
| generated_tests::Execute(device, |
| max_pool_float_2::createTestModel, |
| max_pool_float_2::is_ignored, |
| max_pool_float_2::examples); |
| } |
| |
| namespace max_pool_float_3 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated max_pool_float_3 test |
| #include "examples/max_pool_float_3.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/max_pool_float_3.model.cpp" |
| } // namespace max_pool_float_3 |
| TEST_F(NeuralnetworksHidlTest, max_pool_float_3) { |
| generated_tests::Execute(device, |
| max_pool_float_3::createTestModel, |
| max_pool_float_3::is_ignored, |
| max_pool_float_3::examples); |
| } |
| |
| namespace max_pool_quant8_1 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated max_pool_quant8_1 test |
| #include "examples/max_pool_quant8_1.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/max_pool_quant8_1.model.cpp" |
| } // namespace max_pool_quant8_1 |
| TEST_F(NeuralnetworksHidlTest, max_pool_quant8_1) { |
| generated_tests::Execute(device, |
| max_pool_quant8_1::createTestModel, |
| max_pool_quant8_1::is_ignored, |
| max_pool_quant8_1::examples); |
| } |
| |
| namespace max_pool_quant8_2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated max_pool_quant8_2 test |
| #include "examples/max_pool_quant8_2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/max_pool_quant8_2.model.cpp" |
| } // namespace max_pool_quant8_2 |
| TEST_F(NeuralnetworksHidlTest, max_pool_quant8_2) { |
| generated_tests::Execute(device, |
| max_pool_quant8_2::createTestModel, |
| max_pool_quant8_2::is_ignored, |
| max_pool_quant8_2::examples); |
| } |
| |
| namespace max_pool_quant8_3 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated max_pool_quant8_3 test |
| #include "examples/max_pool_quant8_3.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/max_pool_quant8_3.model.cpp" |
| } // namespace max_pool_quant8_3 |
| TEST_F(NeuralnetworksHidlTest, max_pool_quant8_3) { |
| generated_tests::Execute(device, |
| max_pool_quant8_3::createTestModel, |
| max_pool_quant8_3::is_ignored, |
| max_pool_quant8_3::examples); |
| } |
| |
| namespace mul_broadcast_quant8 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated mul_broadcast_quant8 test |
| #include "examples/mul_broadcast_quant8.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/mul_broadcast_quant8.model.cpp" |
| } // namespace mul_broadcast_quant8 |
| TEST_F(NeuralnetworksHidlTest, mul_broadcast_quant8) { |
| generated_tests::Execute(device, |
| mul_broadcast_quant8::createTestModel, |
| mul_broadcast_quant8::is_ignored, |
| mul_broadcast_quant8::examples); |
| } |
| |
| namespace mul { |
| std::vector<MixedTypedExample> examples = { |
| // Generated mul test |
| #include "examples/mul.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/mul.model.cpp" |
| } // namespace mul |
| TEST_F(NeuralnetworksHidlTest, mul) { |
| generated_tests::Execute(device, |
| mul::createTestModel, |
| mul::is_ignored, |
| mul::examples); |
| } |
| |
| namespace mul_quant8 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated mul_quant8 test |
| #include "examples/mul_quant8.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/mul_quant8.model.cpp" |
| } // namespace mul_quant8 |
| TEST_F(NeuralnetworksHidlTest, mul_quant8) { |
| generated_tests::Execute(device, |
| mul_quant8::createTestModel, |
| mul_quant8::is_ignored, |
| mul_quant8::examples); |
| } |
| |
| namespace mul_relu { |
| std::vector<MixedTypedExample> examples = { |
| // Generated mul_relu test |
| #include "examples/mul_relu.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/mul_relu.model.cpp" |
| } // namespace mul_relu |
| TEST_F(NeuralnetworksHidlTest, mul_relu) { |
| generated_tests::Execute(device, |
| mul_relu::createTestModel, |
| mul_relu::is_ignored, |
| mul_relu::examples); |
| } |
| |
| namespace relu1_float_1 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated relu1_float_1 test |
| #include "examples/relu1_float_1.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/relu1_float_1.model.cpp" |
| } // namespace relu1_float_1 |
| TEST_F(NeuralnetworksHidlTest, relu1_float_1) { |
| generated_tests::Execute(device, |
| relu1_float_1::createTestModel, |
| relu1_float_1::is_ignored, |
| relu1_float_1::examples); |
| } |
| |
| namespace relu1_float_2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated relu1_float_2 test |
| #include "examples/relu1_float_2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/relu1_float_2.model.cpp" |
| } // namespace relu1_float_2 |
| TEST_F(NeuralnetworksHidlTest, relu1_float_2) { |
| generated_tests::Execute(device, |
| relu1_float_2::createTestModel, |
| relu1_float_2::is_ignored, |
| relu1_float_2::examples); |
| } |
| |
| namespace relu1_quant8_1 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated relu1_quant8_1 test |
| #include "examples/relu1_quant8_1.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/relu1_quant8_1.model.cpp" |
| } // namespace relu1_quant8_1 |
| TEST_F(NeuralnetworksHidlTest, relu1_quant8_1) { |
| generated_tests::Execute(device, |
| relu1_quant8_1::createTestModel, |
| relu1_quant8_1::is_ignored, |
| relu1_quant8_1::examples); |
| } |
| |
| namespace relu1_quant8_2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated relu1_quant8_2 test |
| #include "examples/relu1_quant8_2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/relu1_quant8_2.model.cpp" |
| } // namespace relu1_quant8_2 |
| TEST_F(NeuralnetworksHidlTest, relu1_quant8_2) { |
| generated_tests::Execute(device, |
| relu1_quant8_2::createTestModel, |
| relu1_quant8_2::is_ignored, |
| relu1_quant8_2::examples); |
| } |
| |
| namespace relu6_float_1 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated relu6_float_1 test |
| #include "examples/relu6_float_1.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/relu6_float_1.model.cpp" |
| } // namespace relu6_float_1 |
| TEST_F(NeuralnetworksHidlTest, relu6_float_1) { |
| generated_tests::Execute(device, |
| relu6_float_1::createTestModel, |
| relu6_float_1::is_ignored, |
| relu6_float_1::examples); |
| } |
| |
| namespace relu6_float_2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated relu6_float_2 test |
| #include "examples/relu6_float_2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/relu6_float_2.model.cpp" |
| } // namespace relu6_float_2 |
| TEST_F(NeuralnetworksHidlTest, relu6_float_2) { |
| generated_tests::Execute(device, |
| relu6_float_2::createTestModel, |
| relu6_float_2::is_ignored, |
| relu6_float_2::examples); |
| } |
| |
| namespace relu6_quant8_1 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated relu6_quant8_1 test |
| #include "examples/relu6_quant8_1.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/relu6_quant8_1.model.cpp" |
| } // namespace relu6_quant8_1 |
| TEST_F(NeuralnetworksHidlTest, relu6_quant8_1) { |
| generated_tests::Execute(device, |
| relu6_quant8_1::createTestModel, |
| relu6_quant8_1::is_ignored, |
| relu6_quant8_1::examples); |
| } |
| |
| namespace relu6_quant8_2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated relu6_quant8_2 test |
| #include "examples/relu6_quant8_2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/relu6_quant8_2.model.cpp" |
| } // namespace relu6_quant8_2 |
| TEST_F(NeuralnetworksHidlTest, relu6_quant8_2) { |
| generated_tests::Execute(device, |
| relu6_quant8_2::createTestModel, |
| relu6_quant8_2::is_ignored, |
| relu6_quant8_2::examples); |
| } |
| |
| namespace relu_float_1 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated relu_float_1 test |
| #include "examples/relu_float_1.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/relu_float_1.model.cpp" |
| } // namespace relu_float_1 |
| TEST_F(NeuralnetworksHidlTest, relu_float_1) { |
| generated_tests::Execute(device, |
| relu_float_1::createTestModel, |
| relu_float_1::is_ignored, |
| relu_float_1::examples); |
| } |
| |
| namespace relu_float_2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated relu_float_2 test |
| #include "examples/relu_float_2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/relu_float_2.model.cpp" |
| } // namespace relu_float_2 |
| TEST_F(NeuralnetworksHidlTest, relu_float_2) { |
| generated_tests::Execute(device, |
| relu_float_2::createTestModel, |
| relu_float_2::is_ignored, |
| relu_float_2::examples); |
| } |
| |
| namespace relu_quant8_1 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated relu_quant8_1 test |
| #include "examples/relu_quant8_1.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/relu_quant8_1.model.cpp" |
| } // namespace relu_quant8_1 |
| TEST_F(NeuralnetworksHidlTest, relu_quant8_1) { |
| generated_tests::Execute(device, |
| relu_quant8_1::createTestModel, |
| relu_quant8_1::is_ignored, |
| relu_quant8_1::examples); |
| } |
| |
| namespace relu_quant8_2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated relu_quant8_2 test |
| #include "examples/relu_quant8_2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/relu_quant8_2.model.cpp" |
| } // namespace relu_quant8_2 |
| TEST_F(NeuralnetworksHidlTest, relu_quant8_2) { |
| generated_tests::Execute(device, |
| relu_quant8_2::createTestModel, |
| relu_quant8_2::is_ignored, |
| relu_quant8_2::examples); |
| } |
| |
| namespace reshape { |
| std::vector<MixedTypedExample> examples = { |
| // Generated reshape test |
| #include "examples/reshape.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/reshape.model.cpp" |
| } // namespace reshape |
| TEST_F(NeuralnetworksHidlTest, reshape) { |
| generated_tests::Execute(device, |
| reshape::createTestModel, |
| reshape::is_ignored, |
| reshape::examples); |
| } |
| |
| namespace reshape_quant8 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated reshape_quant8 test |
| #include "examples/reshape_quant8.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/reshape_quant8.model.cpp" |
| } // namespace reshape_quant8 |
| TEST_F(NeuralnetworksHidlTest, reshape_quant8) { |
| generated_tests::Execute(device, |
| reshape_quant8::createTestModel, |
| reshape_quant8::is_ignored, |
| reshape_quant8::examples); |
| } |
| |
| namespace resize_bilinear { |
| std::vector<MixedTypedExample> examples = { |
| // Generated resize_bilinear test |
| #include "examples/resize_bilinear.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/resize_bilinear.model.cpp" |
| } // namespace resize_bilinear |
| TEST_F(NeuralnetworksHidlTest, resize_bilinear) { |
| generated_tests::Execute(device, |
| resize_bilinear::createTestModel, |
| resize_bilinear::is_ignored, |
| resize_bilinear::examples); |
| } |
| |
| namespace rnn { |
| std::vector<MixedTypedExample> examples = { |
| // Generated rnn test |
| #include "examples/rnn.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/rnn.model.cpp" |
| } // namespace rnn |
| TEST_F(NeuralnetworksHidlTest, rnn) { |
| generated_tests::Execute(device, |
| rnn::createTestModel, |
| rnn::is_ignored, |
| rnn::examples); |
| } |
| |
| namespace softmax_float_1 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated softmax_float_1 test |
| #include "examples/softmax_float_1.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/softmax_float_1.model.cpp" |
| } // namespace softmax_float_1 |
| TEST_F(NeuralnetworksHidlTest, softmax_float_1) { |
| generated_tests::Execute(device, |
| softmax_float_1::createTestModel, |
| softmax_float_1::is_ignored, |
| softmax_float_1::examples); |
| } |
| |
| namespace softmax_float_2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated softmax_float_2 test |
| #include "examples/softmax_float_2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/softmax_float_2.model.cpp" |
| } // namespace softmax_float_2 |
| TEST_F(NeuralnetworksHidlTest, softmax_float_2) { |
| generated_tests::Execute(device, |
| softmax_float_2::createTestModel, |
| softmax_float_2::is_ignored, |
| softmax_float_2::examples); |
| } |
| |
| namespace softmax_quant8_1 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated softmax_quant8_1 test |
| #include "examples/softmax_quant8_1.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/softmax_quant8_1.model.cpp" |
| } // namespace softmax_quant8_1 |
| TEST_F(NeuralnetworksHidlTest, softmax_quant8_1) { |
| generated_tests::Execute(device, |
| softmax_quant8_1::createTestModel, |
| softmax_quant8_1::is_ignored, |
| softmax_quant8_1::examples); |
| } |
| |
| namespace softmax_quant8_2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated softmax_quant8_2 test |
| #include "examples/softmax_quant8_2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/softmax_quant8_2.model.cpp" |
| } // namespace softmax_quant8_2 |
| TEST_F(NeuralnetworksHidlTest, softmax_quant8_2) { |
| generated_tests::Execute(device, |
| softmax_quant8_2::createTestModel, |
| softmax_quant8_2::is_ignored, |
| softmax_quant8_2::examples); |
| } |
| |
| namespace space_to_depth_float_1 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated space_to_depth_float_1 test |
| #include "examples/space_to_depth_float_1.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/space_to_depth_float_1.model.cpp" |
| } // namespace space_to_depth_float_1 |
| TEST_F(NeuralnetworksHidlTest, space_to_depth_float_1) { |
| generated_tests::Execute(device, |
| space_to_depth_float_1::createTestModel, |
| space_to_depth_float_1::is_ignored, |
| space_to_depth_float_1::examples); |
| } |
| |
| namespace space_to_depth_float_2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated space_to_depth_float_2 test |
| #include "examples/space_to_depth_float_2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/space_to_depth_float_2.model.cpp" |
| } // namespace space_to_depth_float_2 |
| TEST_F(NeuralnetworksHidlTest, space_to_depth_float_2) { |
| generated_tests::Execute(device, |
| space_to_depth_float_2::createTestModel, |
| space_to_depth_float_2::is_ignored, |
| space_to_depth_float_2::examples); |
| } |
| |
| namespace space_to_depth_quant8_1 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated space_to_depth_quant8_1 test |
| #include "examples/space_to_depth_quant8_1.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/space_to_depth_quant8_1.model.cpp" |
| } // namespace space_to_depth_quant8_1 |
| TEST_F(NeuralnetworksHidlTest, space_to_depth_quant8_1) { |
| generated_tests::Execute(device, |
| space_to_depth_quant8_1::createTestModel, |
| space_to_depth_quant8_1::is_ignored, |
| space_to_depth_quant8_1::examples); |
| } |
| |
| namespace space_to_depth_quant8_2 { |
| std::vector<MixedTypedExample> examples = { |
| // Generated space_to_depth_quant8_2 test |
| #include "examples/space_to_depth_quant8_2.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/space_to_depth_quant8_2.model.cpp" |
| } // namespace space_to_depth_quant8_2 |
| TEST_F(NeuralnetworksHidlTest, space_to_depth_quant8_2) { |
| generated_tests::Execute(device, |
| space_to_depth_quant8_2::createTestModel, |
| space_to_depth_quant8_2::is_ignored, |
| space_to_depth_quant8_2::examples); |
| } |
| |
| namespace svdf { |
| std::vector<MixedTypedExample> examples = { |
| // Generated svdf test |
| #include "examples/svdf.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/svdf.model.cpp" |
| } // namespace svdf |
| TEST_F(NeuralnetworksHidlTest, svdf) { |
| generated_tests::Execute(device, |
| svdf::createTestModel, |
| svdf::is_ignored, |
| svdf::examples); |
| } |
| |
| namespace tanh { |
| std::vector<MixedTypedExample> examples = { |
| // Generated tanh test |
| #include "examples/tanh.example.cpp" |
| }; |
| // Generated model constructor |
| #include "vts_models/tanh.model.cpp" |
| } // namespace tanh |
| TEST_F(NeuralnetworksHidlTest, tanh) { |
| generated_tests::Execute(device, |
| tanh::createTestModel, |
| tanh::is_ignored, |
| tanh::examples); |
| } |