Merge "Enable generated validation tests."
diff --git a/nn/runtime/test/TestGenerated.cpp b/nn/runtime/test/TestGenerated.cpp
index d259bb6..5f663e3 100644
--- a/nn/runtime/test/TestGenerated.cpp
+++ b/nn/runtime/test/TestGenerated.cpp
@@ -83,21 +83,29 @@
}
} // namespace
-Compilation GeneratedTests::compileModel(const Model* model) {
+std::optional<Compilation> GeneratedTests::compileModel(const Model* model) {
NNTRACE_APP(NNTRACE_PHASE_COMPILATION, "compileModel");
if (mTestCompilationCaching) {
// Compile the model twice with the same token, so that compilation caching will be
// exercised if supported by the driver.
+ // No invalid model will be passed to this branch.
+ EXPECT_FALSE(mExpectFailure);
Compilation compilation1(model);
- compilation1.setCaching(mCacheDir, mToken);
- compilation1.finish();
+ EXPECT_EQ(compilation1.setCaching(mCacheDir, mToken), Result::NO_ERROR);
+ EXPECT_EQ(compilation1.finish(), Result::NO_ERROR);
Compilation compilation2(model);
- compilation2.setCaching(mCacheDir, mToken);
- compilation2.finish();
+ EXPECT_EQ(compilation2.setCaching(mCacheDir, mToken), Result::NO_ERROR);
+ EXPECT_EQ(compilation2.finish(), Result::NO_ERROR);
return compilation2;
} else {
Compilation compilation(model);
- compilation.finish();
+ Result result = compilation.finish();
+
+ // For valid model, we check the compilation result == NO_ERROR.
+ // For invalid model, the driver may fail at compilation or execution, so any result code is
+ // permitted at this point.
+ if (mExpectFailure && result != Result::NO_ERROR) return std::nullopt;
+ EXPECT_EQ(result, Result::NO_ERROR);
return compilation;
}
}
@@ -151,8 +159,14 @@
});
}
- Result r = execution.compute();
- ASSERT_EQ(Result::NO_ERROR, r);
+ Result result = execution.compute();
+ if (mExpectFailure) {
+ ASSERT_NE(result, Result::NO_ERROR);
+ continue;
+ } else {
+ ASSERT_EQ(result, Result::NO_ERROR);
+ }
+
{
NNTRACE_APP(NNTRACE_PHASE_RESULTS, "executeWithCompilation example");
@@ -188,8 +202,10 @@
void GeneratedTests::executeOnce(const Model* model, std::function<bool(int)> isIgnored,
std::vector<MixedTypedExample>& examples, std::string dumpFile) {
NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeOnce");
- Compilation compilation = compileModel(model);
- executeWithCompilation(model, &compilation, isIgnored, examples, dumpFile);
+ std::optional<Compilation> compilation = compileModel(model);
+ // Early return if compilation fails. The compilation result code is checked in compileModel.
+ if (!compilation) return;
+ executeWithCompilation(model, &compilation.value(), isIgnored, examples, dumpFile);
}
void GeneratedTests::executeMultithreadedOwnCompilation(const Model* model,
@@ -209,11 +225,14 @@
std::vector<MixedTypedExample>& examples) {
NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeMultithreadedSharedCompilation");
SCOPED_TRACE("MultithreadedSharedCompilation");
- Compilation compilation = compileModel(model);
+ std::optional<Compilation> compilation = compileModel(model);
+ // Early return if compilation fails. The ompilation result code is checked in compileModel.
+ if (!compilation) return;
std::vector<std::thread> threads;
for (int i = 0; i < 10; i++) {
- threads.push_back(std::thread(
- [&]() { executeWithCompilation(model, &compilation, isIgnored, examples, ""); }));
+ threads.push_back(std::thread([&]() {
+ executeWithCompilation(model, &compilation.value(), isIgnored, examples, "");
+ }));
}
std::for_each(threads.begin(), threads.end(), [](std::thread& t) { t.join(); });
}
@@ -239,8 +258,10 @@
};
mTestCompilationCaching = false;
executeInternal(dumpFile);
- mTestCompilationCaching = true;
- executeInternal("");
+ if (!mExpectFailure) {
+ mTestCompilationCaching = true;
+ executeInternal("");
+ }
}
void GeneratedTests::SetUp() {
diff --git a/nn/runtime/test/TestGenerated.h b/nn/runtime/test/TestGenerated.h
index 2965d46..819c351 100644
--- a/nn/runtime/test/TestGenerated.h
+++ b/nn/runtime/test/TestGenerated.h
@@ -19,6 +19,8 @@
#include <gtest/gtest.h>
+#include <optional>
+
#include "TestCompliance.h"
#include "TestHarness.h"
#include "TestNeuralNetworksWrapper.h"
@@ -59,10 +61,12 @@
class GeneratedTests : public GENERATED_TESTS_BASE {
protected:
+ GeneratedTests(bool expectFailure = false) : mExpectFailure(expectFailure) {}
+
virtual void SetUp() override;
virtual void TearDown() override;
- Compilation compileModel(const Model* model);
+ std::optional<Compilation> compileModel(const Model* model);
void executeWithCompilation(const Model* model, Compilation* compilation,
std::function<bool(int)> isIgnored,
std::vector<MixedTypedExample>& examples, std::string dumpFile);
@@ -79,7 +83,8 @@
std::string mCacheDir;
std::vector<uint8_t> mToken;
- bool mTestCompilationCaching;
+ bool mTestCompilationCaching = false;
+ bool mExpectFailure = false;
#ifdef NNTEST_COMPUTE_MODE
// SetUp() uses Execution::setComputeMode() to establish a new ComputeMode,
// and saves off the previous ComputeMode here; TearDown() restores that
@@ -92,6 +97,12 @@
// Tag for the dynamic output shape tests
class DynamicOutputShapeTest : public GeneratedTests {};
+// Tag for the generated validation tests
+class GeneratedValidationTests : public GeneratedTests {
+ protected:
+ GeneratedValidationTests() : GeneratedTests(/*expectFailure=*/true) {}
+};
+
} // namespace generated_tests
using namespace generated_tests;
diff --git a/nn/runtime/test/generated/examples/strided_slice_invalid_output_dims.example.cpp b/nn/runtime/test/generated/examples/strided_slice_invalid_output_dims.example.cpp
new file mode 100644
index 0000000..0640833
--- /dev/null
+++ b/nn/runtime/test/generated/examples/strided_slice_invalid_output_dims.example.cpp
@@ -0,0 +1,116 @@
+// clang-format off
+// Generated file (from: strided_slice_invalid_output_dims.mod.py). Do not edit
+std::vector<MixedTypedExample>& get_examples() {
+static std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2, 3}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {3}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {1.0f, 2.0f, 3.0f}}},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples;
+};
+
+std::vector<MixedTypedExample>& get_examples_dynamic_output_shape() {
+static std::vector<MixedTypedExample> examples_dynamic_output_shape = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {2, 3}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> Dimensions map
+ .operandDimensions = {{0, {3}}},
+ // int -> FLOAT32 map
+ .float32Operands = {{0, {1.0f, 2.0f, 3.0f}}},
+ // int -> INT32 map
+ .int32Operands = {},
+ // int -> QUANT8_ASYMM map
+ .quant8AsymmOperands = {},
+ // int -> QUANT16_SYMM map
+ .quant16SymmOperands = {},
+ // int -> FLOAT16 map
+ .float16Operands = {},
+ // int -> BOOL8 map
+ .bool8Operands = {},
+ // int -> QUANT8_SYMM_PER_CHANNEL map
+ .quant8ChannelOperands = {},
+ // int -> QUANT16_ASYMM map
+ .quant16AsymmOperands = {},
+ // int -> QUANT8_SYMM map
+ .quant8SymmOperands = {},
+}
+},
+}, // End of an example
+};
+return examples_dynamic_output_shape;
+};
+
diff --git a/nn/runtime/test/generated/models/strided_slice_invalid_output_dims.model.cpp b/nn/runtime/test/generated/models/strided_slice_invalid_output_dims.model.cpp
new file mode 100644
index 0000000..c8aedeb
--- /dev/null
+++ b/nn/runtime/test/generated/models/strided_slice_invalid_output_dims.model.cpp
@@ -0,0 +1,82 @@
+// clang-format off
+// Generated file (from: strided_slice_invalid_output_dims.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {2, 3});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ OperandType type2(Type::INT32, {});
+ OperandType type3(Type::TENSOR_FLOAT32, {3});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto begins = model->addOperand(&type1);
+ auto ends = model->addOperand(&type1);
+ auto strides = model->addOperand(&type1);
+ auto beginMask = model->addOperand(&type2);
+ auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t begins_init[] = {0, 0};
+ model->setOperandValue(begins, begins_init, sizeof(int32_t) * 2);
+ static int32_t ends_init[] = {2, 3};
+ model->setOperandValue(ends, ends_init, sizeof(int32_t) * 2);
+ static int32_t strides_init[] = {1, 1};
+ model->setOperandValue(strides, strides_init, sizeof(int32_t) * 2);
+ static int32_t beginMask_init[] = {0};
+ model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+ static int32_t endMask_init[] = {0};
+ model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+ static int32_t shrinkAxisMask_init[] = {1};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_dynamic_output_shape(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {2, 3});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ OperandType type2(Type::INT32, {});
+ OperandType type4(Type::TENSOR_FLOAT32, {0});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto begins = model->addOperand(&type1);
+ auto ends = model->addOperand(&type1);
+ auto strides = model->addOperand(&type1);
+ auto beginMask = model->addOperand(&type2);
+ auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
+ auto output = model->addOperand(&type4);
+ // Phase 2, operations
+ static int32_t begins_init[] = {0, 0};
+ model->setOperandValue(begins, begins_init, sizeof(int32_t) * 2);
+ static int32_t ends_init[] = {2, 3};
+ model->setOperandValue(ends, ends_init, sizeof(int32_t) * 2);
+ static int32_t strides_init[] = {1, 1};
+ model->setOperandValue(strides, strides_init, sizeof(int32_t) * 2);
+ static int32_t beginMask_init[] = {0};
+ model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+ static int32_t endMask_init[] = {0};
+ model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+ static int32_t shrinkAxisMask_init[] = {1};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+inline bool is_ignored_dynamic_output_shape(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/tests/strided_slice_invalid_output_dims.mod.py.cpp b/nn/runtime/test/generated/tests/strided_slice_invalid_output_dims.mod.py.cpp
new file mode 100644
index 0000000..7af9af4
--- /dev/null
+++ b/nn/runtime/test/generated/tests/strided_slice_invalid_output_dims.mod.py.cpp
@@ -0,0 +1,23 @@
+// clang-format off
+// Generated file (from: strided_slice_invalid_output_dims.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace strided_slice_invalid_output_dims {
+// Generated strided_slice_invalid_output_dims test
+#include "generated/examples/strided_slice_invalid_output_dims.example.cpp"
+// Generated model constructor
+#include "generated/models/strided_slice_invalid_output_dims.model.cpp"
+} // namespace strided_slice_invalid_output_dims
+
+TEST_F(GeneratedValidationTests, strided_slice_invalid_output_dims) {
+ execute(strided_slice_invalid_output_dims::CreateModel,
+ strided_slice_invalid_output_dims::is_ignored,
+ strided_slice_invalid_output_dims::get_examples());
+}
+
+TEST_F(GeneratedValidationTests, strided_slice_invalid_output_dims_dynamic_output_shape) {
+ execute(strided_slice_invalid_output_dims::CreateModel_dynamic_output_shape,
+ strided_slice_invalid_output_dims::is_ignored_dynamic_output_shape,
+ strided_slice_invalid_output_dims::get_examples_dynamic_output_shape());
+}
+
diff --git a/nn/runtime/test/generated/vts/V1_2/all_generated_V1_2_vts_tests.cpp b/nn/runtime/test/generated/vts/V1_2/all_generated_V1_2_vts_tests.cpp
index 3a9fe0d..6256de4 100644
--- a/nn/runtime/test/generated/vts/V1_2/all_generated_V1_2_vts_tests.cpp
+++ b/nn/runtime/test/generated/vts/V1_2/all_generated_V1_2_vts_tests.cpp
@@ -69061,6 +69061,28 @@
}
+// Generated from: strided_slice_invalid_output_dims.mod.py.
+namespace strided_slice_invalid_output_dims {
+// Generated strided_slice_invalid_output_dims test
+#include "examples/strided_slice_invalid_output_dims.example.cpp"
+// Generated model constructor
+#include "vts/V1_2/models/strided_slice_invalid_output_dims.model.cpp"
+} // namespace strided_slice_invalid_output_dims
+
+TEST_F(ValidationTest, strided_slice_invalid_output_dims) {
+ const Model model = strided_slice_invalid_output_dims::createTestModel();
+ const std::vector<Request> requests = createRequests(strided_slice_invalid_output_dims::get_examples());
+ validateFailure(model, requests);
+}
+
+
+TEST_F(ValidationTest, strided_slice_invalid_output_dims_dynamic_output_shape) {
+ const Model model = strided_slice_invalid_output_dims::createTestModel_dynamic_output_shape();
+ const std::vector<Request> requests = createRequests(strided_slice_invalid_output_dims::get_examples_dynamic_output_shape());
+ validateFailure(model, requests);
+}
+
+
// Generated from: sub_quantized_different_scales.mod.py.
namespace sub_quantized_different_scales {
// Generated sub_quantized_different_scales test
diff --git a/nn/runtime/test/generated/vts/V1_2/models/strided_slice_invalid_output_dims.model.cpp b/nn/runtime/test/generated/vts/V1_2/models/strided_slice_invalid_output_dims.model.cpp
new file mode 100644
index 0000000..106655a
--- /dev/null
+++ b/nn/runtime/test/generated/vts/V1_2/models/strided_slice_invalid_output_dims.model.cpp
@@ -0,0 +1,216 @@
+// clang-format off
+// Generated file (from: strided_slice_invalid_output_dims.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {2, 3},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {2},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 0, .length = 8},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {2},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 8, .length = 8},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {2},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 16, .length = 8},
+ },
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 24, .length = 4},
+ },
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 28, .length = 4},
+ },
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 32, .length = 4},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {3},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::STRIDED_SLICE,
+ .inputs = {0, 1, 2, 3, 4, 5, 6},
+ .outputs = {7},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0};
+ const std::vector<uint32_t> outputIndexes = {7};
+ std::vector<uint8_t> operandValues = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
+ };
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_dynamic_output_shape() {
+ const std::vector<Operand> operands = {
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {2, 3},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {2},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 0, .length = 8},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {2},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 8, .length = 8},
+ },
+ {
+ .type = OperandType::TENSOR_INT32,
+ .dimensions = {2},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 16, .length = 8},
+ },
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 24, .length = 4},
+ },
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 28, .length = 4},
+ },
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 1,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::CONSTANT_COPY,
+ .location = {.poolIndex = 0, .offset = 32, .length = 4},
+ },
+ {
+ .type = OperandType::TENSOR_FLOAT32,
+ .dimensions = {0},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_OUTPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ }
+ };
+
+ const std::vector<Operation> operations = {
+ {
+ .type = OperationType::STRIDED_SLICE,
+ .inputs = {0, 1, 2, 3, 4, 5, 6},
+ .outputs = {7},
+ }
+ };
+
+ const std::vector<uint32_t> inputIndexes = {0};
+ const std::vector<uint32_t> outputIndexes = {7};
+ std::vector<uint8_t> operandValues = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
+ };
+ const std::vector<hidl_memory> pools = {};
+
+ return {
+ .operands = operands,
+ .operations = operations,
+ .inputIndexes = inputIndexes,
+ .outputIndexes = outputIndexes,
+ .operandValues = operandValues,
+ .pools = pools,
+ };
+}
+
+inline bool is_ignored_dynamic_output_shape(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/specs/V1_2/strided_slice_invalid_output_dims.mod.py b/nn/runtime/test/specs/V1_2/strided_slice_invalid_output_dims.mod.py
new file mode 100644
index 0000000..76d2179
--- /dev/null
+++ b/nn/runtime/test/specs/V1_2/strided_slice_invalid_output_dims.mod.py
@@ -0,0 +1,41 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TODO: Move this spec to V1_3 directory?
+#
+# This test makes sure that executing STRIDED_SLICE results in a failure when
+# the output dimensions do not match shrinkAxisMask.
+#
+# Based on strided_slice_float_11.mod.py.
+
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{2, 3}")
+begins = Parameter("begins", "TENSOR_INT32", "{2}", [0, 0])
+# The value "2" below makes the test invalid. See http://b/79856511#comment2.
+ends = Parameter("ends", "TENSOR_INT32", "{2}", [2, 3])
+strides = Parameter("strides", "TENSOR_INT32", "{2}", [1, 1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 1)
+
+output = Output("output", "TENSOR_FLOAT32", "{3}")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+Example({
+ i1: [1, 2, 3, 4, 5, 6],
+ output: [1, 2, 3],
+}).ExpectFailure()
diff --git a/nn/tools/test_generator/README.md b/nn/tools/test_generator/README.md
index 4c6ff51..18dd4c4 100644
--- a/nn/tools/test_generator/README.md
+++ b/nn/tools/test_generator/README.md
@@ -336,6 +336,14 @@
This is useful when only a subset of variations has a different version.
+### Creating negative tests
+
+Negative test, also known as validation test, is a testing method that supplies invalid model or request, and expects the target framework or driver to fail gracefully. You can use `ExpectFailure` to tag a example as invalid.
+
+```Python
+Example.ExpectFailure()
+```
+
### A Complete Example
```Python
diff --git a/nn/tools/test_generator/cts_generator.py b/nn/tools/test_generator/cts_generator.py
index df2d390..ba0d50e 100755
--- a/nn/tools/test_generator/cts_generator.py
+++ b/nn/tools/test_generator/cts_generator.py
@@ -270,12 +270,19 @@
execute({namespace}::{create_model_name},
{namespace}::{is_ignored_name},
{namespace}::get_{examples_name}(){log_file});\n}}\n"""
- if example.model.version is not None:
+ if example.model.version is not None and not example.expectFailure:
testTemplate += """\
TEST_AVAILABLE_SINCE({version}, {test_name}, {namespace}::{create_model_name})\n"""
+
+ if example.expectFailure:
+ testCaseName = "GeneratedValidationTests"
+ elif example.model.hasDynamicOutputShape:
+ testCaseName = "DynamicOutputShapeTest"
+ else:
+ testCaseName = "GeneratedTests"
+
print(testTemplate.format(
- test_case_name="DynamicOutputShapeTest" if example.model.hasDynamicOutputShape \
- else "GeneratedTests",
+ test_case_name=testCaseName,
test_name=str(example.testName),
namespace=tg.FileNames.specName,
create_model_name=str(example.model.createFunctionName),
diff --git a/nn/tools/test_generator/test_generator.py b/nn/tools/test_generator/test_generator.py
index 59fd080..a3f98ea 100755
--- a/nn/tools/test_generator/test_generator.py
+++ b/nn/tools/test_generator/test_generator.py
@@ -978,6 +978,7 @@
self.model = Model.models[-1] if model is None else model
self.name = name
self.expectedMultinomialDistributionTolerance = None
+ self.expectFailure = False
self.feedDicts = []
for feedDict in args:
if type(feedDict) is tuple or type(feedDict) is list:
@@ -1159,9 +1160,16 @@
# If set to greater than zero, the input is compared as log-probabilities
# to the output and must be within this tolerance to pass.
def WithMultinomialDistributionTolerance(self, expectedTolerance):
+ assert self.expectFailure is False
self.expectedMultinomialDistributionTolerance = expectedTolerance
return self
+ # Specifies that this example is expected to fail during compilation or execution.
+ def ExpectFailure(self):
+ assert self.expectedMultinomialDistributionTolerance is None
+ self.expectFailure = True
+ return self
+
# For backward-compatibility with slicing.py
# Similar to dump_dict, but in python. Used by the slicing tool
# if referenced is not None, only print operands that are present there
diff --git a/nn/tools/test_generator/vts_generator.py b/nn/tools/test_generator/vts_generator.py
index a222ec8..fd93520 100755
--- a/nn/tools/test_generator/vts_generator.py
+++ b/nn/tools/test_generator/vts_generator.py
@@ -287,19 +287,24 @@
if example.model.hasDynamicOutputShape and target_hal_version < "V1_2":
return
- testTemplate = """\
+ generatedTestTemplate = """\
TEST_F({test_case_name}, {test_name}) {{
generated_tests::Execute(device,
{namespace}::{create_model_name},
{namespace}::{is_ignored_name},
- {namespace}::get_{examples_name}(){test_dynamic_output_shape});\n}}
-
+ {namespace}::get_{examples_name}(){test_dynamic_output_shape});\n}}\n
+"""
+ validationTestTemplate = """\
TEST_F(ValidationTest, {test_name}) {{
const Model model = {namespace}::{create_model_name}();
const std::vector<Request> requests = createRequests({namespace}::get_{examples_name}());
- validateEverything(model, requests);
+ {validation_method}(model, requests);
}}\n
"""
+
+ testTemplate = validationTestTemplate if example.expectFailure \
+ else generatedTestTemplate + validationTestTemplate
+
print(testTemplate.format(
test_case_name="DynamicOutputShapeTest" if example.model.hasDynamicOutputShape \
else "NeuralnetworksHidlTest",
@@ -308,7 +313,8 @@
create_model_name=str(example.model.createTestFunctionName),
is_ignored_name=str(example.model.isIgnoredFunctionName),
examples_name=str(example.examplesName),
- test_dynamic_output_shape=", true" if example.model.hasDynamicOutputShape else ""
+ test_dynamic_output_shape=", true" if example.model.hasDynamicOutputShape else "",
+ validation_method="validateFailure" if example.expectFailure else "validateEverything"
), file=test_fd)
def InitializeFiles(model_fd, example_fd, test_fd):