Add NCHW data layout support to BATCH_TO_SPACE_ND.

Add an optional parameter data_layout to BATCH_TO_SPACE_ND op for NCHW
data layout support. Create testcases and generate test.

Provide reference CPU implementation for NCHW support. The native layout
for CPU implementation is still NHWC. Appropriate transpositions are
applied for NCHW input/output.

Bug: 112320705

Test: NeuralNetworksTest_static
Change-Id: Ie4aa8c7cc949d4b673daebf635fdd2149fcd4a1c
Merged-In: Ie4aa8c7cc949d4b673daebf635fdd2149fcd4a1c
(cherry picked from commit 8acaf87a6b1121425f011f4a308f3923590dfbe8)
diff --git a/common/CpuExecutor.cpp b/common/CpuExecutor.cpp
index fd4317e..fddbb93 100644
--- a/common/CpuExecutor.cpp
+++ b/common/CpuExecutor.cpp
@@ -1504,25 +1504,41 @@
                 svdf.Eval();
         } break;
         case OperationType::BATCH_TO_SPACE_ND: {
-            if (!allParametersPresent(2, 1)) {
+            const size_t inCount = ins.size();
+            if ((inCount != 3 && inCount != 2) || !allParametersPresent(inCount, 1)) {
                 return ANEURALNETWORKS_BAD_DATA;
             }
             const RunTimeOperandInfo& input = mOperands[ins[0]];
             const RunTimeOperandInfo& blockSize = mOperands[ins[1]];
+            bool data_layout = inCount == 3 ? getScalarData<bool>(mOperands[ins[2]]) : false;
 
             RunTimeOperandInfo& output = mOperands[outs[0]];
             Shape outShape = output.shape();
 
-            success = batchToSpacePrepare(input.shape(),
+            RunTimeOperandInfo input_tmp, output_tmp;
+            std::unique_ptr<uint8_t[]> input_tmp_guard, output_tmp_guard;
+            if (!convertToNhwc(input_tmp, input, input_tmp_guard, data_layout)) {
+                success = false;
+                break;
+            }
+            output_tmp.lifetime = OperandLifeTime::TEMPORARY_VARIABLE;
+            output_tmp.buffer = data_layout ? nullptr : output.buffer;
+
+            success = batchToSpacePrepare(input_tmp.shape(),
                                           reinterpret_cast<const int32_t*>(blockSize.buffer),
-                                          blockSize.shape(),
-                                          &outShape) &&
-                      setInfoAndAllocateIfNeeded(&output, outShape) &&
-                      batchToSpaceGeneric(input.buffer,
-                                          input.shape(),
+                                          blockSize.shape(), &outShape) &&
+                      setInfoAndAllocateIfNeeded(&output_tmp, outShape) &&
+                      batchToSpaceGeneric(input_tmp.buffer, input_tmp.shape(),
                                           reinterpret_cast<const int32_t*>(blockSize.buffer),
-                                          output.buffer,
-                                          outShape);
+                                          output_tmp.buffer, outShape);
+
+            if (data_layout) {
+                output_tmp_guard.reset(output_tmp.buffer);
+            }
+            if (!success || !convertFromNhwc(output, output_tmp, data_layout)) {
+                success = false;
+                break;
+            }
         } break;
         case OperationType::SPACE_TO_BATCH_ND: {
             if (!allParametersPresent(3, 1)) {
diff --git a/common/Utils.cpp b/common/Utils.cpp
index 69375f2..e38a2b3 100644
--- a/common/Utils.cpp
+++ b/common/Utils.cpp
@@ -1298,8 +1298,10 @@
                                                  outExpectedTypes);
         }
         case ANEURALNETWORKS_BATCH_TO_SPACE_ND: {
-            if (inputCount != 2 || outputCount != 1) {
-                logInvalidInOutNumber(2, 1);
+            if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
+                LOG(ERROR) << "Invalid number of input operands (" << inputCount
+                           << ", expected 3 or 2) or output operands (" << outputCount
+                           << ", expected 1) for operation " << kOperationNames[opType];
                 return ANEURALNETWORKS_BAD_DATA;
             }
             auto inputType = operands[inputIndexes[0]].type;
@@ -1318,6 +1320,9 @@
                            << kOperationNames[opType];
                 return ANEURALNETWORKS_BAD_DATA;
             }
+            if (inputCount == 3) {
+                inExpectedTypes.push_back(OperandType::BOOL);
+            }
             return validateOperationOperandTypes(operands,
                                                  inputCount, inputIndexes,
                                                  inExpectedTypes,
diff --git a/runtime/include/NeuralNetworks.h b/runtime/include/NeuralNetworks.h
index 95eabc0..8166826 100644
--- a/runtime/include/NeuralNetworks.h
+++ b/runtime/include/NeuralNetworks.h
@@ -1561,13 +1561,18 @@
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
      *
-     * Supported tensor rank: 4
+     * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+     * With the default data layout NHWC, the data is stored in the order of:
+     * [batch, height, width, channels]. Alternatively, the data layout could
+     * be NCHW, the data storage order of: [batch, channels, height, width].
      *
      * Inputs:
      * * 0: An n-D tensor, specifying the tensor to be reshaped
      * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block
      *      sizes for each spatial dimension of the input tensor. All values
      *      must be >= 1.
+     * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
+     *      Set to true to specify NCHW data layout for input0 and output0.
      *
      * Outputs:
      * * 0: A tensor of the same {@link OperandCode} as input0.
diff --git a/runtime/test/for-cts/TestGeneratedOneFile.cpp b/runtime/test/for-cts/TestGeneratedOneFile.cpp
index 2548b6c..2ba3051 100644
--- a/runtime/test/for-cts/TestGeneratedOneFile.cpp
+++ b/runtime/test/for-cts/TestGeneratedOneFile.cpp
@@ -344,6 +344,7 @@
 #include "../generated/tests/argmin_2_quant8.mod.py.cpp"
 #include "../generated/tests/argmin_3_float.mod.py.cpp"
 #include "../generated/tests/avg_pool_v1_2.mod.py.cpp"
+#include "../generated/tests/batch_to_space_v1_2.mod.py.cpp"
 #include "../generated/tests/channel_shuffle.mod.py.cpp"
 #include "../generated/tests/conv2d_v1_2.mod.py.cpp"
 #include "../generated/tests/depth_to_space_v1_2.mod.py.cpp"
diff --git a/runtime/test/generated/all_generated_V1_2_vts_tests.cpp b/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
index 47a7b90..f0db69a 100644
--- a/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
+++ b/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
@@ -489,6 +489,98 @@
                            avg_pool_v1_2::examples_nchw_quant8_5);
 }
 
+// Generated from: batch_to_space_v1_2.mod.py.
+namespace batch_to_space_v1_2 {
+// Generated batch_to_space_v1_2 test
+#include "examples/batch_to_space_v1_2.example.cpp"
+// Generated model constructor
+#include "vts_models/batch_to_space_v1_2.model.cpp"
+} // namespace batch_to_space_v1_2
+
+TEST_F(NeuralnetworksHidlTest, batch_to_space_v1_2_nhwc) {
+  generated_tests::Execute(device,
+                           batch_to_space_v1_2::createTestModel_nhwc,
+                           batch_to_space_v1_2::is_ignored_nhwc,
+                           batch_to_space_v1_2::examples_nhwc);
+}
+
+TEST_F(NeuralnetworksHidlTest, batch_to_space_v1_2_nhwc_relaxed) {
+  generated_tests::Execute(device,
+                           batch_to_space_v1_2::createTestModel_nhwc_relaxed,
+                           batch_to_space_v1_2::is_ignored_nhwc_relaxed,
+                           batch_to_space_v1_2::examples_nhwc_relaxed);
+}
+
+TEST_F(NeuralnetworksHidlTest, batch_to_space_v1_2_nhwc_quant8) {
+  generated_tests::Execute(device,
+                           batch_to_space_v1_2::createTestModel_nhwc_quant8,
+                           batch_to_space_v1_2::is_ignored_nhwc_quant8,
+                           batch_to_space_v1_2::examples_nhwc_quant8);
+}
+
+TEST_F(NeuralnetworksHidlTest, batch_to_space_v1_2_nchw) {
+  generated_tests::Execute(device,
+                           batch_to_space_v1_2::createTestModel_nchw,
+                           batch_to_space_v1_2::is_ignored_nchw,
+                           batch_to_space_v1_2::examples_nchw);
+}
+
+TEST_F(NeuralnetworksHidlTest, batch_to_space_v1_2_nchw_relaxed) {
+  generated_tests::Execute(device,
+                           batch_to_space_v1_2::createTestModel_nchw_relaxed,
+                           batch_to_space_v1_2::is_ignored_nchw_relaxed,
+                           batch_to_space_v1_2::examples_nchw_relaxed);
+}
+
+TEST_F(NeuralnetworksHidlTest, batch_to_space_v1_2_nchw_quant8) {
+  generated_tests::Execute(device,
+                           batch_to_space_v1_2::createTestModel_nchw_quant8,
+                           batch_to_space_v1_2::is_ignored_nchw_quant8,
+                           batch_to_space_v1_2::examples_nchw_quant8);
+}
+
+TEST_F(NeuralnetworksHidlTest, batch_to_space_v1_2_nhwc_2) {
+  generated_tests::Execute(device,
+                           batch_to_space_v1_2::createTestModel_nhwc_2,
+                           batch_to_space_v1_2::is_ignored_nhwc_2,
+                           batch_to_space_v1_2::examples_nhwc_2);
+}
+
+TEST_F(NeuralnetworksHidlTest, batch_to_space_v1_2_nhwc_relaxed_2) {
+  generated_tests::Execute(device,
+                           batch_to_space_v1_2::createTestModel_nhwc_relaxed_2,
+                           batch_to_space_v1_2::is_ignored_nhwc_relaxed_2,
+                           batch_to_space_v1_2::examples_nhwc_relaxed_2);
+}
+
+TEST_F(NeuralnetworksHidlTest, batch_to_space_v1_2_nhwc_quant8_2) {
+  generated_tests::Execute(device,
+                           batch_to_space_v1_2::createTestModel_nhwc_quant8_2,
+                           batch_to_space_v1_2::is_ignored_nhwc_quant8_2,
+                           batch_to_space_v1_2::examples_nhwc_quant8_2);
+}
+
+TEST_F(NeuralnetworksHidlTest, batch_to_space_v1_2_nchw_2) {
+  generated_tests::Execute(device,
+                           batch_to_space_v1_2::createTestModel_nchw_2,
+                           batch_to_space_v1_2::is_ignored_nchw_2,
+                           batch_to_space_v1_2::examples_nchw_2);
+}
+
+TEST_F(NeuralnetworksHidlTest, batch_to_space_v1_2_nchw_relaxed_2) {
+  generated_tests::Execute(device,
+                           batch_to_space_v1_2::createTestModel_nchw_relaxed_2,
+                           batch_to_space_v1_2::is_ignored_nchw_relaxed_2,
+                           batch_to_space_v1_2::examples_nchw_relaxed_2);
+}
+
+TEST_F(NeuralnetworksHidlTest, batch_to_space_v1_2_nchw_quant8_2) {
+  generated_tests::Execute(device,
+                           batch_to_space_v1_2::createTestModel_nchw_quant8_2,
+                           batch_to_space_v1_2::is_ignored_nchw_quant8_2,
+                           batch_to_space_v1_2::examples_nchw_quant8_2);
+}
+
 // Generated from: channel_shuffle.mod.py.
 namespace channel_shuffle {
 // Generated channel_shuffle test
diff --git a/runtime/test/generated/examples/batch_to_space_v1_2.example.cpp b/runtime/test/generated/examples/batch_to_space_v1_2.example.cpp
new file mode 100644
index 0000000..ad758ba
--- /dev/null
+++ b/runtime/test/generated/examples/batch_to_space_v1_2.example.cpp
@@ -0,0 +1,290 @@
+// clang-format off
+// Generated file (from: batch_to_space_v1_2.mod.py). Do not edit
+std::vector<MixedTypedExample> examples_nhwc = {
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_nhwc_relaxed = {
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_nhwc_quant8 = {
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {14, 23, 32, 41, 54, 63, 72, 81}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {14, 23, 32, 41, 54, 63, 72, 81}}}
+}
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_nchw = {
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.4f, 3.2f, 5.4f, 7.2f, 2.3f, 4.1f, 6.3f, 8.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_nchw_relaxed = {
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.4f, 3.2f, 5.4f, 7.2f, 2.3f, 4.1f, 6.3f, 8.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_nchw_quant8 = {
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {14, 23, 32, 41, 54, 63, 72, 81}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {14, 32, 54, 72, 23, 41, 63, 81}}}
+}
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_nhwc_2 = {
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 5.0f, 2.0f, 6.0f, 9.0f, 13.0f, 10.0f, 14.0f, 3.0f, 7.0f, 4.0f, 8.0f, 11.0f, 15.0f, 12.0f, 16.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_nhwc_relaxed_2 = {
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 5.0f, 2.0f, 6.0f, 9.0f, 13.0f, 10.0f, 14.0f, 3.0f, 7.0f, 4.0f, 8.0f, 11.0f, 15.0f, 12.0f, 16.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_nhwc_quant8_2 = {
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {130, 138, 132, 140, 146, 154, 148, 156, 134, 142, 136, 144, 150, 158, 152, 160}}}
+}
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_nchw_2 = {
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 5.0f, 2.0f, 6.0f, 9.0f, 13.0f, 10.0f, 14.0f, 3.0f, 7.0f, 4.0f, 8.0f, 11.0f, 15.0f, 12.0f, 16.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_nchw_relaxed_2 = {
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 5.0f, 2.0f, 6.0f, 9.0f, 13.0f, 10.0f, 14.0f, 3.0f, 7.0f, 4.0f, 8.0f, 11.0f, 15.0f, 12.0f, 16.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_nchw_quant8_2 = {
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {130, 138, 132, 140, 146, 154, 148, 156, 134, 142, 136, 144, 150, 158, 152, 160}}}
+}
+}, // End of an example
+};
+
diff --git a/runtime/test/generated/models/batch_to_space_v1_2.model.cpp b/runtime/test/generated/models/batch_to_space_v1_2.model.cpp
new file mode 100644
index 0000000..ef76330
--- /dev/null
+++ b/runtime/test/generated/models/batch_to_space_v1_2.model.cpp
@@ -0,0 +1,422 @@
+// clang-format off
+// Generated file (from: batch_to_space_v1_2.mod.py). Do not edit
+void CreateModel_nhwc(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type1(Type::TENSOR_FLOAT32, {4, 1, 1, 2});
+  OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+  OperandType type3(Type::TENSOR_INT32, {2});
+  OperandType type4(Type::TENSOR_FLOAT32, {4, 2, 2, 1});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type1);
+  auto param = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op4 = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t param_init[] = {2, 2};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op4});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_relaxed(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type1(Type::TENSOR_FLOAT32, {4, 1, 1, 2});
+  OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+  OperandType type3(Type::TENSOR_INT32, {2});
+  OperandType type4(Type::TENSOR_FLOAT32, {4, 2, 2, 1});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type1);
+  auto param = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op4 = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t param_init[] = {2, 2};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op4});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_quant8(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type1(Type::TENSOR_FLOAT32, {4, 1, 1, 2});
+  OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+  OperandType type3(Type::TENSOR_INT32, {2});
+  OperandType type4(Type::TENSOR_FLOAT32, {4, 2, 2, 1});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
+  OperandType type6(Type::TENSOR_QUANT8_ASYMM, {4, 1, 1, 2}, 0.1f, 0);
+  OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type6);
+  auto param = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op4 = model->addOperand(&type7);
+  // Phase 2, operations
+  static int32_t param_init[] = {2, 2};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op4});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type1(Type::TENSOR_FLOAT32, {4, 1, 1, 2});
+  OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+  OperandType type3(Type::TENSOR_INT32, {2});
+  OperandType type4(Type::TENSOR_FLOAT32, {4, 2, 2, 1});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
+  OperandType type6(Type::TENSOR_QUANT8_ASYMM, {4, 1, 1, 2}, 0.1f, 0);
+  OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
+  OperandType type8(Type::TENSOR_FLOAT32, {4, 2, 1, 1});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type8);
+  auto param = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op4 = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t param_init[] = {2, 2};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op4});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_relaxed(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type1(Type::TENSOR_FLOAT32, {4, 1, 1, 2});
+  OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+  OperandType type3(Type::TENSOR_INT32, {2});
+  OperandType type4(Type::TENSOR_FLOAT32, {4, 2, 2, 1});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
+  OperandType type6(Type::TENSOR_QUANT8_ASYMM, {4, 1, 1, 2}, 0.1f, 0);
+  OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
+  OperandType type8(Type::TENSOR_FLOAT32, {4, 2, 1, 1});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type8);
+  auto param = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op4 = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t param_init[] = {2, 2};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op4});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_quant8(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type1(Type::TENSOR_FLOAT32, {4, 1, 1, 2});
+  OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+  OperandType type3(Type::TENSOR_INT32, {2});
+  OperandType type4(Type::TENSOR_FLOAT32, {4, 2, 2, 1});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
+  OperandType type6(Type::TENSOR_QUANT8_ASYMM, {4, 1, 1, 2}, 0.1f, 0);
+  OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
+  OperandType type8(Type::TENSOR_FLOAT32, {4, 2, 1, 1});
+  OperandType type9(Type::TENSOR_QUANT8_ASYMM, {4, 2, 1, 1}, 0.1f, 0);
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type9);
+  auto param = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op4 = model->addOperand(&type7);
+  // Phase 2, operations
+  static int32_t param_init[] = {2, 2};
+  model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op4});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type1(Type::TENSOR_FLOAT32, {4, 1, 1, 2});
+  OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+  OperandType type3(Type::TENSOR_INT32, {2});
+  OperandType type4(Type::TENSOR_FLOAT32, {4, 2, 2, 1});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
+  OperandType type6(Type::TENSOR_QUANT8_ASYMM, {4, 1, 1, 2}, 0.1f, 0);
+  OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
+  OperandType type8(Type::TENSOR_FLOAT32, {4, 2, 1, 1});
+  OperandType type9(Type::TENSOR_QUANT8_ASYMM, {4, 2, 1, 1}, 0.1f, 0);
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type4);
+  auto param1 = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op41 = model->addOperand(&type5);
+  // Phase 2, operations
+  static int32_t param1_init[] = {2, 2};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11},
+    {op41});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_relaxed_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type1(Type::TENSOR_FLOAT32, {4, 1, 1, 2});
+  OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+  OperandType type3(Type::TENSOR_INT32, {2});
+  OperandType type4(Type::TENSOR_FLOAT32, {4, 2, 2, 1});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
+  OperandType type6(Type::TENSOR_QUANT8_ASYMM, {4, 1, 1, 2}, 0.1f, 0);
+  OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
+  OperandType type8(Type::TENSOR_FLOAT32, {4, 2, 1, 1});
+  OperandType type9(Type::TENSOR_QUANT8_ASYMM, {4, 2, 1, 1}, 0.1f, 0);
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type4);
+  auto param1 = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op41 = model->addOperand(&type5);
+  // Phase 2, operations
+  static int32_t param1_init[] = {2, 2};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11},
+    {op41});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_relaxed_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nhwc_quant8_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type1(Type::TENSOR_FLOAT32, {4, 1, 1, 2});
+  OperandType type10(Type::TENSOR_QUANT8_ASYMM, {4, 2, 2, 1}, 0.5f, 128);
+  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 0.5f, 128);
+  OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+  OperandType type3(Type::TENSOR_INT32, {2});
+  OperandType type4(Type::TENSOR_FLOAT32, {4, 2, 2, 1});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
+  OperandType type6(Type::TENSOR_QUANT8_ASYMM, {4, 1, 1, 2}, 0.1f, 0);
+  OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
+  OperandType type8(Type::TENSOR_FLOAT32, {4, 2, 1, 1});
+  OperandType type9(Type::TENSOR_QUANT8_ASYMM, {4, 2, 1, 1}, 0.1f, 0);
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type10);
+  auto param1 = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op41 = model->addOperand(&type11);
+  // Phase 2, operations
+  static int32_t param1_init[] = {2, 2};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
+  static bool layout_init[] = {false};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11},
+    {op41});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nhwc_quant8_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type1(Type::TENSOR_FLOAT32, {4, 1, 1, 2});
+  OperandType type10(Type::TENSOR_QUANT8_ASYMM, {4, 2, 2, 1}, 0.5f, 128);
+  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 0.5f, 128);
+  OperandType type12(Type::TENSOR_FLOAT32, {4, 1, 2, 2});
+  OperandType type13(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
+  OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+  OperandType type3(Type::TENSOR_INT32, {2});
+  OperandType type4(Type::TENSOR_FLOAT32, {4, 2, 2, 1});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
+  OperandType type6(Type::TENSOR_QUANT8_ASYMM, {4, 1, 1, 2}, 0.1f, 0);
+  OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
+  OperandType type8(Type::TENSOR_FLOAT32, {4, 2, 1, 1});
+  OperandType type9(Type::TENSOR_QUANT8_ASYMM, {4, 2, 1, 1}, 0.1f, 0);
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type12);
+  auto param1 = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op41 = model->addOperand(&type13);
+  // Phase 2, operations
+  static int32_t param1_init[] = {2, 2};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11},
+    {op41});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_relaxed_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type1(Type::TENSOR_FLOAT32, {4, 1, 1, 2});
+  OperandType type10(Type::TENSOR_QUANT8_ASYMM, {4, 2, 2, 1}, 0.5f, 128);
+  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 0.5f, 128);
+  OperandType type12(Type::TENSOR_FLOAT32, {4, 1, 2, 2});
+  OperandType type13(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
+  OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+  OperandType type3(Type::TENSOR_INT32, {2});
+  OperandType type4(Type::TENSOR_FLOAT32, {4, 2, 2, 1});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
+  OperandType type6(Type::TENSOR_QUANT8_ASYMM, {4, 1, 1, 2}, 0.1f, 0);
+  OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
+  OperandType type8(Type::TENSOR_FLOAT32, {4, 2, 1, 1});
+  OperandType type9(Type::TENSOR_QUANT8_ASYMM, {4, 2, 1, 1}, 0.1f, 0);
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type12);
+  auto param1 = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op41 = model->addOperand(&type13);
+  // Phase 2, operations
+  static int32_t param1_init[] = {2, 2};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11},
+    {op41});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_relaxed_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_nchw_quant8_2(Model *model) {
+  OperandType type0(Type::BOOL, {});
+  OperandType type1(Type::TENSOR_FLOAT32, {4, 1, 1, 2});
+  OperandType type10(Type::TENSOR_QUANT8_ASYMM, {4, 2, 2, 1}, 0.5f, 128);
+  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 0.5f, 128);
+  OperandType type12(Type::TENSOR_FLOAT32, {4, 1, 2, 2});
+  OperandType type13(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
+  OperandType type14(Type::TENSOR_QUANT8_ASYMM, {4, 1, 2, 2}, 0.5f, 128);
+  OperandType type15(Type::TENSOR_QUANT8_ASYMM, {1, 1, 4, 4}, 0.5f, 128);
+  OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+  OperandType type3(Type::TENSOR_INT32, {2});
+  OperandType type4(Type::TENSOR_FLOAT32, {4, 2, 2, 1});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
+  OperandType type6(Type::TENSOR_QUANT8_ASYMM, {4, 1, 1, 2}, 0.1f, 0);
+  OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
+  OperandType type8(Type::TENSOR_FLOAT32, {4, 2, 1, 1});
+  OperandType type9(Type::TENSOR_QUANT8_ASYMM, {4, 2, 1, 1}, 0.1f, 0);
+  // Phase 1, operands
+  auto op11 = model->addOperand(&type14);
+  auto param1 = model->addOperand(&type3);
+  auto layout = model->addOperand(&type0);
+  auto op41 = model->addOperand(&type15);
+  // Phase 2, operations
+  static int32_t param1_init[] = {2, 2};
+  model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
+  static bool layout_init[] = {true};
+  model->setOperandValue(layout, layout_init, sizeof(bool) * 1);
+  model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op11},
+    {op41});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_nchw_quant8_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/generated/tests/batch_to_space_v1_2.mod.py.cpp b/runtime/test/generated/tests/batch_to_space_v1_2.mod.py.cpp
new file mode 100644
index 0000000..38a7ec8
--- /dev/null
+++ b/runtime/test/generated/tests/batch_to_space_v1_2.mod.py.cpp
@@ -0,0 +1,83 @@
+// clang-format off
+// Generated file (from: batch_to_space_v1_2.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace batch_to_space_v1_2 {
+// Generated batch_to_space_v1_2 test
+#include "generated/examples/batch_to_space_v1_2.example.cpp"
+// Generated model constructor
+#include "generated/models/batch_to_space_v1_2.model.cpp"
+} // namespace batch_to_space_v1_2
+
+TEST_F(GeneratedTests, batch_to_space_v1_2_nhwc) {
+    execute(batch_to_space_v1_2::CreateModel_nhwc,
+            batch_to_space_v1_2::is_ignored_nhwc,
+            batch_to_space_v1_2::examples_nhwc);
+}
+
+TEST_F(GeneratedTests, batch_to_space_v1_2_nhwc_relaxed) {
+    execute(batch_to_space_v1_2::CreateModel_nhwc_relaxed,
+            batch_to_space_v1_2::is_ignored_nhwc_relaxed,
+            batch_to_space_v1_2::examples_nhwc_relaxed);
+}
+
+TEST_F(GeneratedTests, batch_to_space_v1_2_nhwc_quant8) {
+    execute(batch_to_space_v1_2::CreateModel_nhwc_quant8,
+            batch_to_space_v1_2::is_ignored_nhwc_quant8,
+            batch_to_space_v1_2::examples_nhwc_quant8);
+}
+
+TEST_F(GeneratedTests, batch_to_space_v1_2_nchw) {
+    execute(batch_to_space_v1_2::CreateModel_nchw,
+            batch_to_space_v1_2::is_ignored_nchw,
+            batch_to_space_v1_2::examples_nchw);
+}
+
+TEST_F(GeneratedTests, batch_to_space_v1_2_nchw_relaxed) {
+    execute(batch_to_space_v1_2::CreateModel_nchw_relaxed,
+            batch_to_space_v1_2::is_ignored_nchw_relaxed,
+            batch_to_space_v1_2::examples_nchw_relaxed);
+}
+
+TEST_F(GeneratedTests, batch_to_space_v1_2_nchw_quant8) {
+    execute(batch_to_space_v1_2::CreateModel_nchw_quant8,
+            batch_to_space_v1_2::is_ignored_nchw_quant8,
+            batch_to_space_v1_2::examples_nchw_quant8);
+}
+
+TEST_F(GeneratedTests, batch_to_space_v1_2_nhwc_2) {
+    execute(batch_to_space_v1_2::CreateModel_nhwc_2,
+            batch_to_space_v1_2::is_ignored_nhwc_2,
+            batch_to_space_v1_2::examples_nhwc_2);
+}
+
+TEST_F(GeneratedTests, batch_to_space_v1_2_nhwc_relaxed_2) {
+    execute(batch_to_space_v1_2::CreateModel_nhwc_relaxed_2,
+            batch_to_space_v1_2::is_ignored_nhwc_relaxed_2,
+            batch_to_space_v1_2::examples_nhwc_relaxed_2);
+}
+
+TEST_F(GeneratedTests, batch_to_space_v1_2_nhwc_quant8_2) {
+    execute(batch_to_space_v1_2::CreateModel_nhwc_quant8_2,
+            batch_to_space_v1_2::is_ignored_nhwc_quant8_2,
+            batch_to_space_v1_2::examples_nhwc_quant8_2);
+}
+
+TEST_F(GeneratedTests, batch_to_space_v1_2_nchw_2) {
+    execute(batch_to_space_v1_2::CreateModel_nchw_2,
+            batch_to_space_v1_2::is_ignored_nchw_2,
+            batch_to_space_v1_2::examples_nchw_2);
+}
+
+TEST_F(GeneratedTests, batch_to_space_v1_2_nchw_relaxed_2) {
+    execute(batch_to_space_v1_2::CreateModel_nchw_relaxed_2,
+            batch_to_space_v1_2::is_ignored_nchw_relaxed_2,
+            batch_to_space_v1_2::examples_nchw_relaxed_2);
+}
+
+TEST_F(GeneratedTests, batch_to_space_v1_2_nchw_quant8_2) {
+    execute(batch_to_space_v1_2::CreateModel_nchw_quant8_2,
+            batch_to_space_v1_2::is_ignored_nchw_quant8_2,
+            batch_to_space_v1_2::examples_nchw_quant8_2);
+}
+
diff --git a/runtime/test/generated/vts_models/batch_to_space_v1_2.model.cpp b/runtime/test/generated/vts_models/batch_to_space_v1_2.model.cpp
new file mode 100644
index 0000000..d679a88
--- /dev/null
+++ b/runtime/test/generated/vts_models/batch_to_space_v1_2.model.cpp
@@ -0,0 +1,858 @@
+// clang-format off
+// Generated file (from: batch_to_space_v1_2.mod.py). Do not edit
+// Create the model
+Model createTestModel_nhwc() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 2,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BATCH_TO_SPACE_ND,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_nhwc_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 2,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BATCH_TO_SPACE_ND,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_nhwc_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_nhwc_quant8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {4, 1, 1, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 2,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.1f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BATCH_TO_SPACE_ND,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_nchw() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 2, 1, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 2,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BATCH_TO_SPACE_ND,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_nchw_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 2, 1, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 2,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BATCH_TO_SPACE_ND,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_nchw_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_nchw_quant8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {4, 2, 1, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.1f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 2,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 2, 2, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.1f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BATCH_TO_SPACE_ND,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_quant8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_nhwc_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 2,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 4, 4, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BATCH_TO_SPACE_ND,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_nhwc_relaxed_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 2,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 4, 4, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BATCH_TO_SPACE_ND,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_nhwc_relaxed_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_nhwc_quant8_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {4, 2, 2, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 2,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 4, 4, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.5f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BATCH_TO_SPACE_ND,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 0
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nhwc_quant8_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_nchw_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 1, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 2,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 4, 4},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BATCH_TO_SPACE_ND,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_nchw_relaxed_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 1, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 2,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 1, 4, 4},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BATCH_TO_SPACE_ND,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_nchw_relaxed_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_nchw_quant8_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {4, 1, 2, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.5f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 0, .length = 8},
+        },
+        {
+            .type = OperandType::BOOL,
+            .dimensions = {},
+            .numberOfConsumers = 2,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::CONSTANT_COPY,
+            .location = {.poolIndex = 0, .offset = 8, .length = 1},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {1, 1, 4, 4},
+            .numberOfConsumers = 0,
+            .scale = 0.5f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::BATCH_TO_SPACE_ND,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {
+      2, 0, 0, 0, 2, 0, 0, 0, 1
+    };
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_nchw_quant8_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/runtime/test/specs/V1_2/batch_to_space_v1_2.mod.py b/runtime/test/specs/V1_2/batch_to_space_v1_2.mod.py
new file mode 100644
index 0000000..f1c3aa4
--- /dev/null
+++ b/runtime/test/specs/V1_2/batch_to_space_v1_2.mod.py
@@ -0,0 +1,52 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: BATCH_TO_SPACE_NCHW_1, block_size = [2, 2]
+i1 = Input("op1", "TENSOR_FLOAT32", "{4, 1, 1, 2}")
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
+Model().Operation("BATCH_TO_SPACE_ND", i1, [2, 2], layout).To(o1)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+    i1: ("TENSOR_QUANT8_ASYMM", 0.1, 0),
+    o1: ("TENSOR_QUANT8_ASYMM", 0.1, 0)
+})
+
+# Instantiate an example
+example = Example({
+    i1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1],
+    o1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]
+}).AddNchw([i1, o1], [layout], defaultName="nhwc").AddVariations("relaxed", quant8)
+
+
+# TEST 2: BATCH_TO_SPACE_NCHW_2, block_size = [2, 2]
+i2 = Input("op1", "TENSOR_FLOAT32", "{4, 2, 2, 1}")
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
+Model().Operation("BATCH_TO_SPACE_ND", i2, [2, 2], layout).To(o2)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+    i2: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
+    o2: ("TENSOR_QUANT8_ASYMM", 0.5, 128)
+})
+
+# Instantiate an example
+example = Example({
+    i2: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
+    o2: [1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16]
+}).AddNchw([i2, o2], [layout], defaultName="nhwc").AddVariations("relaxed", quant8)