Add new op SLICE

* Add CPU implementation
* Add cts/vts tests

Fix: 113561737
Fix: 118607966
Test: NeuralNetworksTest_static
Change-Id: Ib80e9e9d05285d5078fdd0f66c93b63835478f55
Merged-In: Ib80e9e9d05285d5078fdd0f66c93b63835478f55
(cherry picked from commit 70f6dd771dec195562d371508522d00e62bc84c0)
diff --git a/nn/common/Android.bp b/nn/common/Android.bp
index 167fb5b..0d61715 100644
--- a/nn/common/Android.bp
+++ b/nn/common/Android.bp
@@ -96,6 +96,7 @@
         "operations/RoiAlign.cpp",
         "operations/RoiPooling.cpp",
         "operations/SimpleMath.cpp",
+        "operations/Slice.cpp",
         "operations/Split.cpp",
         "operations/StridedSlice.cpp",
         "operations/SVDF.cpp",
diff --git a/nn/common/CpuExecutor.cpp b/nn/common/CpuExecutor.cpp
index 75d9ade..9d7beb3 100644
--- a/nn/common/CpuExecutor.cpp
+++ b/nn/common/CpuExecutor.cpp
@@ -2652,6 +2652,26 @@
                       topk_v2::eval(input.buffer, input.shape(), k, values.buffer, valuesShape,
                                     indices.buffer, indicesShape);
         } break;
+        case OperationType::SLICE: {
+            if (!allParametersPresent(3, 1)) {
+                return ANEURALNETWORKS_BAD_DATA;
+            }
+            const RunTimeOperandInfo& input = mOperands[ins[0]];
+            const RunTimeOperandInfo& begin = mOperands[ins[1]];
+            const RunTimeOperandInfo& size = mOperands[ins[2]];
+
+            RunTimeOperandInfo& output = mOperands[outs[0]];
+            Shape outputShape = output.shape();
+
+            const int32_t* beginBuffer = reinterpret_cast<int32_t*>(begin.buffer);
+            const int32_t* sizeBuffer = reinterpret_cast<int32_t*>(size.buffer);
+
+            success = slice::prepare(input.shape(), beginBuffer, begin.shape(), sizeBuffer,
+                                     size.shape(), &outputShape) &&
+                      setInfoAndAllocateIfNeeded(&output, outputShape) &&
+                      slice::eval(input.buffer, input.shape(), beginBuffer, begin.shape(),
+                                  sizeBuffer, size.shape(), output.buffer, output.shape());
+        } break;
         default: {
             const OperationRegistration* operationRegistration =
                     OperationResolver::get()->findOperation(operation.type);
diff --git a/nn/common/Utils.cpp b/nn/common/Utils.cpp
index e76ef6f..96f90d1 100644
--- a/nn/common/Utils.cpp
+++ b/nn/common/Utils.cpp
@@ -2399,6 +2399,30 @@
                                                  inExpectedTypes, outputCount, outputIndexes,
                                                  outExpectedTypes);
         }
+        case ANEURALNETWORKS_SLICE: {
+            if (inputCount != 3 || outputCount != 1) {
+                logInvalidInOutNumber(3, 1);
+                return ANEURALNETWORKS_BAD_DATA;
+            }
+            std::vector<OperandType> inExpectedTypes;
+            std::vector<OperandType> outExpectedTypes;
+            OperandType inputType = operands[inputIndexes[0]].type;
+            if (inputType == OperandType::TENSOR_FLOAT16 ||
+                inputType == OperandType::TENSOR_FLOAT32 ||
+                inputType == OperandType::TENSOR_INT32 ||
+                inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+                inExpectedTypes = {inputType, OperandType::TENSOR_INT32, OperandType::TENSOR_INT32};
+                outExpectedTypes = {inputType};
+            } else {
+                LOG(ERROR) << "Unsupported input tensor type for operation "
+                           << kOperationNames[opType];
+                return ANEURALNETWORKS_BAD_DATA;
+            }
+            NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
+            return validateOperationOperandTypes(operands, inputCount, inputIndexes,
+                                                 inExpectedTypes, outputCount, outputIndexes,
+                                                 outExpectedTypes);
+        }
         default: {
             const OperationRegistration* operationRegistration =
                     OperationResolver::get()->findOperation(static_cast<OperationType>(opType));
diff --git a/nn/common/include/Operations.h b/nn/common/include/Operations.h
index 5ef71d1..feed2b7 100644
--- a/nn/common/include/Operations.h
+++ b/nn/common/include/Operations.h
@@ -29,6 +29,7 @@
 #include "operations/QuantizedLSTM.h"
 #include "operations/RNN.h"
 #include "operations/SVDF.h"
+#include "operations/Slice.h"
 #include "operations/Tile.h"
 #include "operations/TopK_V2.h"
 
diff --git a/nn/common/operations/Slice.cpp b/nn/common/operations/Slice.cpp
new file mode 100644
index 0000000..9d31b60
--- /dev/null
+++ b/nn/common/operations/Slice.cpp
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Slice.h"
+
+#include "IndexedShapeWrapper.h"
+#include "OperationsUtils.h"
+
+#include <vector>
+
+namespace android {
+namespace nn {
+namespace slice {
+
+namespace {
+
+template <typename T>
+void addVectors(const std::vector<T>& a, const std::vector<T>& b, std::vector<T>* res) {
+    for (int i = 0; i < res->size(); ++i) {
+        res->at(i) = a[i] + b[i];
+    }
+}
+
+template <typename T>
+bool evalGeneric(const T* inputData, const Shape& inputShape, const int32_t* beginData,
+                 const Shape& beginShape, const int32_t* sizeData, const Shape& sizeShape,
+                 T* outputData, const Shape& outputShape) {
+    const int outputSize = getNumberOfElements(outputShape);
+    const IndexedShapeWrapper indexedOutput = IndexedShapeWrapper(outputShape);
+    const IndexedShapeWrapper indexedInput = IndexedShapeWrapper(inputShape);
+    std::vector<uint32_t> outputIndex(getNumberOfDimensions(outputShape), 0);
+    std::vector<uint32_t> beginIndex(getSizeOfDimension(beginShape, 0));
+    std::vector<uint32_t> inputIndex(getNumberOfDimensions(inputShape));
+
+    for (int i = 0; i < beginIndex.size(); ++i) {
+        beginIndex[i] = static_cast<uint32_t>(beginData[i]);
+    }
+
+    bool lastIndex = false;
+    uint32_t outputOffset;
+    uint32_t inputOffset;
+
+    do {
+        addVectors(outputIndex, beginIndex, &inputIndex);
+
+        NN_RET_CHECK(indexedOutput.indexToFlatIndex(outputIndex, &outputOffset));
+        NN_RET_CHECK(indexedInput.indexToFlatIndex(inputIndex, &inputOffset));
+
+        outputData[outputOffset] = inputData[inputOffset];
+        NN_RET_CHECK(indexedOutput.nextIndexInplace(&outputIndex, &lastIndex));
+    } while (!lastIndex);
+    return true;
+}
+
+}  // namespace
+
+bool prepare(const Shape& inputShape, const void* untypedBeginData, const Shape& beginShape,
+             const void* untypedSizeData, const Shape& sizeShape, Shape* outputShape) {
+    const int32_t n_dims = getNumberOfDimensions(inputShape);
+    NN_RET_CHECK(n_dims > 0);
+
+    NN_RET_CHECK_EQ(getNumberOfDimensions(beginShape), 1);
+    NN_RET_CHECK_EQ(getSizeOfDimension(beginShape, 0), n_dims);
+
+    NN_RET_CHECK_EQ(getNumberOfDimensions(sizeShape), 1);
+    NN_RET_CHECK_EQ(getSizeOfDimension(sizeShape, 0), n_dims);
+
+    const int32_t* beginData = reinterpret_cast<const int32_t*>(untypedBeginData);
+    const int32_t* sizeData = reinterpret_cast<const int32_t*>(untypedSizeData);
+
+    outputShape->dimensions.resize(n_dims);
+    for (int i = 0; i < n_dims; ++i) {
+        const int32_t sliceBegin = beginData[i];
+        int32_t sliceSize = sizeData[i];
+        NN_RET_CHECK_LT(beginData[i], getSizeOfDimension(inputShape, i));
+        NN_RET_CHECK(sliceSize > 0 || sliceSize == -1);
+        if (sliceSize == -1) {
+            sliceSize = getSizeOfDimension(inputShape, i) - sliceBegin;
+        }
+        NN_RET_CHECK_LE(sliceBegin + sliceSize, getSizeOfDimension(inputShape, i));
+        outputShape->dimensions[i] = sliceSize;
+    }
+    return true;
+}
+
+bool eval(const void* inputData, const Shape& inputShape, const void* untypedBeginData,
+          const Shape& beginShape, const void* untypedSizeData, const Shape& sizeShape,
+          void* outputData, const Shape& outputShape) {
+    const int32_t* beginData = reinterpret_cast<const int32_t*>(untypedBeginData);
+    const int32_t* sizeData = reinterpret_cast<const int32_t*>(untypedSizeData);
+    switch (inputShape.type) {
+        case OperandType::TENSOR_FLOAT16: {
+            return evalGeneric(reinterpret_cast<const _Float16*>(inputData), inputShape, beginData,
+                               beginShape, sizeData, sizeShape,
+                               reinterpret_cast<_Float16*>(outputData), outputShape);
+        } break;
+        case OperandType::TENSOR_FLOAT32: {
+            return evalGeneric(reinterpret_cast<const float*>(inputData), inputShape, beginData,
+                               beginShape, sizeData, sizeShape,
+                               reinterpret_cast<float*>(outputData), outputShape);
+        } break;
+        case OperandType::TENSOR_INT32: {
+            return evalGeneric(reinterpret_cast<const int32_t*>(inputData), inputShape, beginData,
+                               beginShape, sizeData, sizeShape,
+                               reinterpret_cast<int32_t*>(outputData), outputShape);
+        } break;
+        case OperandType::TENSOR_QUANT8_ASYMM: {
+            return evalGeneric(reinterpret_cast<const uint8_t*>(inputData), inputShape, beginData,
+                               beginShape, sizeData, sizeShape,
+                               reinterpret_cast<uint8_t*>(outputData), outputShape);
+        } break;
+        default: {
+            LOG(ERROR) << "Unsupported data type: " << toString(inputShape.type);
+            return false;
+        }
+    }
+}
+
+}  // namespace slice
+}  // namespace nn
+}  // namespace android
diff --git a/nn/common/operations/Slice.h b/nn/common/operations/Slice.h
new file mode 100644
index 0000000..e9e33cb
--- /dev/null
+++ b/nn/common/operations/Slice.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FRAMEWORKS_ML_NN_SLICE_H
+#define FRAMEWORKS_ML_NN_SLICE_H
+
+#include "CpuOperationUtils.h"
+
+namespace android {
+namespace nn {
+namespace slice {
+
+bool prepare(const Shape& input, const void* beginData, const Shape& beginShape,
+             const void* sizeData, const Shape& sizeShape, Shape* outputShape);
+
+bool eval(const void* inputData, const Shape& inputShape, const void* beginData,
+          const Shape& beginShape, const void* sizeData, const Shape& sizeShape, void* outputData,
+          const Shape& outputShape);
+
+}  // namespace slice
+}  // namespace nn
+}  // namespace android
+
+#endif  // FRAMEWORKS_ML_NN_SLICE_H
diff --git a/nn/runtime/include/NeuralNetworks.h b/nn/runtime/include/NeuralNetworks.h
index 136e95d..5574fd3 100644
--- a/nn/runtime/include/NeuralNetworks.h
+++ b/nn/runtime/include/NeuralNetworks.h
@@ -2690,6 +2690,39 @@
     ANEURALNETWORKS_RSQRT = 75,
     ANEURALNETWORKS_SELECT = 76,
     ANEURALNETWORKS_SIN = 77,
+    /**
+     * Extracts a slice of specified size from the input tensor starting at a
+     * specified location.
+     *
+     * The starting location is specified as a 1-D tensor containing offsets
+     * for each dimension. The size is specified as a 1-D tensor containing
+     * either size of a slice along corresponding dimension or -1. In the latter
+     * case, all the remaining elements in dimension are included in the slice.
+     * Slice size in each dimension cannot be zero.
+     *
+     * A sum of begin offset and a size of a slice must not exceed size of a
+     * corresponding dimension.
+     *
+     * Supported tensor {@link OperandCode}:
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+     * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+     * * {@link ANEURALNETWORKS_TENSOR_INT32}
+     * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+     *
+     * Supported tensor rank: from 1
+     *
+     * Inputs:
+     * * 0: An n-D tensor to take slice from.
+     * * 1: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying
+     *      the beginning indices of the slice in each dimension.
+     * * 2: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying
+     *      the size of the slice in each dimension.
+     *
+     * Outputs:
+     * * 0: An n-D tensor of the same type as the input containing the slice.
+     *
+     * Available since API level 29.
+     */
     ANEURALNETWORKS_SLICE = 78,
     ANEURALNETWORKS_SPARSE_TO_DENSE = 79,
 
diff --git a/nn/runtime/test/TestValidateOperations.cpp b/nn/runtime/test/TestValidateOperations.cpp
index 14c3c76..119cfa8 100644
--- a/nn/runtime/test/TestValidateOperations.cpp
+++ b/nn/runtime/test/TestValidateOperations.cpp
@@ -1659,4 +1659,36 @@
     EXPECT_TRUE(rotatedBBoxTransformTest.testMutatingOutputOperandCode());
     EXPECT_TRUE(rotatedBBoxTransformTest.testMutatingOutputOperandCounts());
 }
+
+void sliceTest(int32_t operandCode) {
+    uint32_t inputDim[] = {3, 3, 3};
+    uint32_t startDim[] = {3};
+    uint32_t sizeDim[] = {3};
+    uint32_t outputDim[] = {1, 2, 3};
+
+    OperationTestBase sliceTest(ANEURALNETWORKS_SLICE,
+                                {getOpType(operandCode, 3, inputDim),
+                                 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, startDim),
+                                 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, sizeDim)},
+                                {getOpType(operandCode, 3, outputDim)});
+
+    EXPECT_TRUE(sliceTest.testMutatingInputOperandCode());
+    EXPECT_TRUE(sliceTest.testMutatingInputOperandCounts());
+    EXPECT_TRUE(sliceTest.testMutatingOutputOperandCode());
+    EXPECT_TRUE(sliceTest.testMutatingOutputOperandCounts());
+}
+
+TEST(OperationValidationTest, SLICE_float32) {
+    sliceTest(ANEURALNETWORKS_TENSOR_FLOAT32);
+}
+TEST(OperationValidationTest, SLICE_int32) {
+    sliceTest(ANEURALNETWORKS_TENSOR_INT32);
+}
+TEST(OperationValidationTest, SLICE_uint8) {
+    sliceTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
+}
+TEST(OperationValidationTest, SLICE_float16) {
+    sliceTest(ANEURALNETWORKS_TENSOR_FLOAT16);
+}
+
 }  // end namespace
diff --git a/nn/runtime/test/for-cts/TestGeneratedOneFile.cpp b/nn/runtime/test/for-cts/TestGeneratedOneFile.cpp
index 9b75bce..0ae2513 100644
--- a/nn/runtime/test/for-cts/TestGeneratedOneFile.cpp
+++ b/nn/runtime/test/for-cts/TestGeneratedOneFile.cpp
@@ -387,6 +387,7 @@
 #include "../generated/tests/roi_align.mod.py.cpp"
 #include "../generated/tests/roi_pooling.mod.py.cpp"
 #include "../generated/tests/rotated_bbox_transform.mod.py.cpp"
+#include "../generated/tests/slice.mod.py.cpp"
 #include "../generated/tests/softmax_v1_2.mod.py.cpp"
 #include "../generated/tests/space_to_batch_v1_2.mod.py.cpp"
 #include "../generated/tests/space_to_depth_v1_2.mod.py.cpp"
diff --git a/nn/runtime/test/generated/all_generated_V1_2_vts_tests.cpp b/nn/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
index c3b855c..15f5b34 100644
--- a/nn/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
+++ b/nn/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
@@ -12084,6 +12084,374 @@
 }
 
 
+// Generated from: slice.mod.py.
+namespace slice {
+// Generated slice test
+#include "examples/slice.example.cpp"
+// Generated model constructor
+#include "vts_models/slice.model.cpp"
+} // namespace slice
+
+TEST_F(NeuralnetworksHidlTest, slice) {
+  generated_tests::Execute(device,
+                           slice::createTestModel,
+                           slice::is_ignored,
+                           slice::examples);
+}
+
+TEST_F(ValidationTest, slice) {
+  const Model model = slice::createTestModel();
+  const std::vector<Request> requests = createRequests(slice::examples);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_relaxed) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_relaxed,
+                           slice::is_ignored_relaxed,
+                           slice::examples_relaxed);
+}
+
+TEST_F(ValidationTest, slice_relaxed) {
+  const Model model = slice::createTestModel_relaxed();
+  const std::vector<Request> requests = createRequests(slice::examples_relaxed);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_float16) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_float16,
+                           slice::is_ignored_float16,
+                           slice::examples_float16);
+}
+
+TEST_F(ValidationTest, slice_float16) {
+  const Model model = slice::createTestModel_float16();
+  const std::vector<Request> requests = createRequests(slice::examples_float16);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_2) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_2,
+                           slice::is_ignored_2,
+                           slice::examples_2);
+}
+
+TEST_F(ValidationTest, slice_2) {
+  const Model model = slice::createTestModel_2();
+  const std::vector<Request> requests = createRequests(slice::examples_2);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_relaxed_2) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_relaxed_2,
+                           slice::is_ignored_relaxed_2,
+                           slice::examples_relaxed_2);
+}
+
+TEST_F(ValidationTest, slice_relaxed_2) {
+  const Model model = slice::createTestModel_relaxed_2();
+  const std::vector<Request> requests = createRequests(slice::examples_relaxed_2);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_float16_2) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_float16_2,
+                           slice::is_ignored_float16_2,
+                           slice::examples_float16_2);
+}
+
+TEST_F(ValidationTest, slice_float16_2) {
+  const Model model = slice::createTestModel_float16_2();
+  const std::vector<Request> requests = createRequests(slice::examples_float16_2);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_3) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_3,
+                           slice::is_ignored_3,
+                           slice::examples_3);
+}
+
+TEST_F(ValidationTest, slice_3) {
+  const Model model = slice::createTestModel_3();
+  const std::vector<Request> requests = createRequests(slice::examples_3);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_relaxed_3) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_relaxed_3,
+                           slice::is_ignored_relaxed_3,
+                           slice::examples_relaxed_3);
+}
+
+TEST_F(ValidationTest, slice_relaxed_3) {
+  const Model model = slice::createTestModel_relaxed_3();
+  const std::vector<Request> requests = createRequests(slice::examples_relaxed_3);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_float16_3) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_float16_3,
+                           slice::is_ignored_float16_3,
+                           slice::examples_float16_3);
+}
+
+TEST_F(ValidationTest, slice_float16_3) {
+  const Model model = slice::createTestModel_float16_3();
+  const std::vector<Request> requests = createRequests(slice::examples_float16_3);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_4) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_4,
+                           slice::is_ignored_4,
+                           slice::examples_4);
+}
+
+TEST_F(ValidationTest, slice_4) {
+  const Model model = slice::createTestModel_4();
+  const std::vector<Request> requests = createRequests(slice::examples_4);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_relaxed_4) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_relaxed_4,
+                           slice::is_ignored_relaxed_4,
+                           slice::examples_relaxed_4);
+}
+
+TEST_F(ValidationTest, slice_relaxed_4) {
+  const Model model = slice::createTestModel_relaxed_4();
+  const std::vector<Request> requests = createRequests(slice::examples_relaxed_4);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_float16_4) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_float16_4,
+                           slice::is_ignored_float16_4,
+                           slice::examples_float16_4);
+}
+
+TEST_F(ValidationTest, slice_float16_4) {
+  const Model model = slice::createTestModel_float16_4();
+  const std::vector<Request> requests = createRequests(slice::examples_float16_4);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_5) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_5,
+                           slice::is_ignored_5,
+                           slice::examples_5);
+}
+
+TEST_F(ValidationTest, slice_5) {
+  const Model model = slice::createTestModel_5();
+  const std::vector<Request> requests = createRequests(slice::examples_5);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_relaxed_5) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_relaxed_5,
+                           slice::is_ignored_relaxed_5,
+                           slice::examples_relaxed_5);
+}
+
+TEST_F(ValidationTest, slice_relaxed_5) {
+  const Model model = slice::createTestModel_relaxed_5();
+  const std::vector<Request> requests = createRequests(slice::examples_relaxed_5);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_float16_5) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_float16_5,
+                           slice::is_ignored_float16_5,
+                           slice::examples_float16_5);
+}
+
+TEST_F(ValidationTest, slice_float16_5) {
+  const Model model = slice::createTestModel_float16_5();
+  const std::vector<Request> requests = createRequests(slice::examples_float16_5);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_6) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_6,
+                           slice::is_ignored_6,
+                           slice::examples_6);
+}
+
+TEST_F(ValidationTest, slice_6) {
+  const Model model = slice::createTestModel_6();
+  const std::vector<Request> requests = createRequests(slice::examples_6);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_relaxed_6) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_relaxed_6,
+                           slice::is_ignored_relaxed_6,
+                           slice::examples_relaxed_6);
+}
+
+TEST_F(ValidationTest, slice_relaxed_6) {
+  const Model model = slice::createTestModel_relaxed_6();
+  const std::vector<Request> requests = createRequests(slice::examples_relaxed_6);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_float16_6) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_float16_6,
+                           slice::is_ignored_float16_6,
+                           slice::examples_float16_6);
+}
+
+TEST_F(ValidationTest, slice_float16_6) {
+  const Model model = slice::createTestModel_float16_6();
+  const std::vector<Request> requests = createRequests(slice::examples_float16_6);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_7) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_7,
+                           slice::is_ignored_7,
+                           slice::examples_7);
+}
+
+TEST_F(ValidationTest, slice_7) {
+  const Model model = slice::createTestModel_7();
+  const std::vector<Request> requests = createRequests(slice::examples_7);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_relaxed_7) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_relaxed_7,
+                           slice::is_ignored_relaxed_7,
+                           slice::examples_relaxed_7);
+}
+
+TEST_F(ValidationTest, slice_relaxed_7) {
+  const Model model = slice::createTestModel_relaxed_7();
+  const std::vector<Request> requests = createRequests(slice::examples_relaxed_7);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_float16_7) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_float16_7,
+                           slice::is_ignored_float16_7,
+                           slice::examples_float16_7);
+}
+
+TEST_F(ValidationTest, slice_float16_7) {
+  const Model model = slice::createTestModel_float16_7();
+  const std::vector<Request> requests = createRequests(slice::examples_float16_7);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_8) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_8,
+                           slice::is_ignored_8,
+                           slice::examples_8);
+}
+
+TEST_F(ValidationTest, slice_8) {
+  const Model model = slice::createTestModel_8();
+  const std::vector<Request> requests = createRequests(slice::examples_8);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_relaxed_8) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_relaxed_8,
+                           slice::is_ignored_relaxed_8,
+                           slice::examples_relaxed_8);
+}
+
+TEST_F(ValidationTest, slice_relaxed_8) {
+  const Model model = slice::createTestModel_relaxed_8();
+  const std::vector<Request> requests = createRequests(slice::examples_relaxed_8);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
+TEST_F(NeuralnetworksHidlTest, slice_float16_8) {
+  generated_tests::Execute(device,
+                           slice::createTestModel_float16_8,
+                           slice::is_ignored_float16_8,
+                           slice::examples_float16_8);
+}
+
+TEST_F(ValidationTest, slice_float16_8) {
+  const Model model = slice::createTestModel_float16_8();
+  const std::vector<Request> requests = createRequests(slice::examples_float16_8);
+  validateModel(model);
+  validateRequests(model, requests);
+}
+
+
 // Generated from: softmax_v1_2.mod.py.
 namespace softmax_v1_2 {
 // Generated softmax_v1_2 test
diff --git a/nn/runtime/test/generated/examples/slice.example.cpp b/nn/runtime/test/generated/examples/slice.example.cpp
new file mode 100644
index 0000000..00e6eea
--- /dev/null
+++ b/nn/runtime/test/generated/examples/slice.example.cpp
@@ -0,0 +1,818 @@
+// clang-format off
+// Generated file (from: slice.mod.py). Do not edit
+std::vector<MixedTypedExample> examples = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
+  // int -> INT32 map
+  {{1, {1}}, {2, {2}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {2.0f, 3.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_relaxed = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
+  // int -> INT32 map
+  {{1, {1}}, {2, {2}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {2.0f, 3.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_float16 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{1, {1}}, {2, {2}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {2.0f, 3.0f}}},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}},
+  // int -> INT32 map
+  {{1, {1, 0}}, {2, {1, 2}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {4.0f, 5.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_relaxed_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}},
+  // int -> INT32 map
+  {{1, {1, 0}}, {2, {1, 2}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {4.0f, 5.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_float16_2 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{1, {1, 0}}, {2, {1, 2}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {4.0f, 5.0f}}},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f}}},
+  // int -> INT32 map
+  {{1, {0, 0, 0}}, {2, {2, 3, 2}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_relaxed_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f}}},
+  // int -> INT32 map
+  {{1, {0, 0, 0}}, {2, {2, 3, 2}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_float16_3 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{1, {0, 0, 0}}, {2, {2, 3, 2}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f}}},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_4 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
+  // int -> INT32 map
+  {{1, {1, 0, 0, 0}}, {2, {3, 1, 1, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {2.0f, 3.0f, 4.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_relaxed_4 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
+  // int -> INT32 map
+  {{1, {1, 0, 0, 0}}, {2, {3, 1, 1, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {2.0f, 3.0f, 4.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_float16_4 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{1, {1, 0, 0, 0}}, {2, {3, 1, 1, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {{0, {2.0f, 3.0f, 4.0f}}},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_5 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}, {1, {1, 0, 0, 0}}, {2, {1, 1, 3, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {3, 3, 3}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_relaxed_5 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}, {1, {1, 0, 0, 0}}, {2, {1, 1, 3, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {3, 3, 3}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_float16_5 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}, {1, {1, 0, 0, 0}}, {2, {1, 1, 3, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {3, 3, 3}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_6 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}, {1, {1, 0, 0, 0}}, {2, {2, 1, 3, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {3, 3, 3, 5, 5, 5}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_relaxed_6 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}, {1, {1, 0, 0, 0}}, {2, {2, 1, 3, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {3, 3, 3, 5, 5, 5}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_float16_6 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}, {1, {1, 0, 0, 0}}, {2, {2, 1, 3, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {3, 3, 3, 5, 5, 5}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_7 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{1, {1, 0, 0, 0}}, {2, {2, 1, 3, 1}}},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {3, 3, 3, 5, 5, 5}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_relaxed_7 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{1, {1, 0, 0, 0}}, {2, {2, 1, 3, 1}}},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {3, 3, 3, 5, 5, 5}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_float16_7 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{1, {1, 0, 0, 0}}, {2, {2, 1, 3, 1}}},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {3, 3, 3, 5, 5, 5}}},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}, {1, {1, 0, 0, 0}}, {2, {2, 1, -1, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {3, 3, 3, 5, 5, 5}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_relaxed_8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}, {1, {1, 0, 0, 0}}, {2, {2, 1, -1, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {3, 3, 3, 5, 5, 5}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
+std::vector<MixedTypedExample> examples_float16_8 = {
+// Begin of an example
+{
+.operands = {
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}}, {1, {1, 0, 0, 0}}, {2, {2, 1, -1, 1}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {3, 3, 3, 5, 5, 5}}},
+  // int -> QUANT8_ASYMM map
+  {},
+  // int -> QUANT16_SYMM map
+  {},
+  // int -> FLOAT16 map
+  {},
+}
+},
+}, // End of an example
+};
+
diff --git a/nn/runtime/test/generated/models/slice.model.cpp b/nn/runtime/test/generated/models/slice.model.cpp
new file mode 100644
index 0000000..078ba36
--- /dev/null
+++ b/nn/runtime/test/generated/models/slice.model.cpp
@@ -0,0 +1,567 @@
+// clang-format off
+// Generated file (from: slice.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {4});
+  OperandType type1(Type::TENSOR_INT32, {1});
+  OperandType type2(Type::TENSOR_FLOAT32, {2});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto begin = model->addOperand(&type1);
+  auto size = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input, begin, size}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input, begin, size},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {4});
+  OperandType type1(Type::TENSOR_INT32, {1});
+  OperandType type2(Type::TENSOR_FLOAT32, {2});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto begin = model->addOperand(&type1);
+  auto size = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input, begin, size}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input, begin, size},
+    {output});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16(Model *model) {
+  OperandType type1(Type::TENSOR_INT32, {1});
+  OperandType type16(Type::TENSOR_FLOAT16, {4});
+  OperandType type17(Type::TENSOR_FLOAT16, {2});
+  // Phase 1, operands
+  auto input = model->addOperand(&type16);
+  auto begin = model->addOperand(&type1);
+  auto size = model->addOperand(&type1);
+  auto output = model->addOperand(&type17);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input, begin, size}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input, begin, size},
+    {output});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_2(Model *model) {
+  OperandType type3(Type::TENSOR_FLOAT32, {2, 3});
+  OperandType type4(Type::TENSOR_INT32, {2});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 2});
+  // Phase 1, operands
+  auto input1 = model->addOperand(&type3);
+  auto begin1 = model->addOperand(&type4);
+  auto size1 = model->addOperand(&type4);
+  auto output1 = model->addOperand(&type5);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input1, begin1, size1}, {output1});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input1, begin1, size1},
+    {output1});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_2(Model *model) {
+  OperandType type3(Type::TENSOR_FLOAT32, {2, 3});
+  OperandType type4(Type::TENSOR_INT32, {2});
+  OperandType type5(Type::TENSOR_FLOAT32, {1, 2});
+  // Phase 1, operands
+  auto input1 = model->addOperand(&type3);
+  auto begin1 = model->addOperand(&type4);
+  auto size1 = model->addOperand(&type4);
+  auto output1 = model->addOperand(&type5);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input1, begin1, size1}, {output1});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input1, begin1, size1},
+    {output1});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_2(Model *model) {
+  OperandType type18(Type::TENSOR_FLOAT16, {2, 3});
+  OperandType type19(Type::TENSOR_FLOAT16, {1, 2});
+  OperandType type4(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input1 = model->addOperand(&type18);
+  auto begin1 = model->addOperand(&type4);
+  auto size1 = model->addOperand(&type4);
+  auto output1 = model->addOperand(&type19);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input1, begin1, size1}, {output1});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input1, begin1, size1},
+    {output1});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_3(Model *model) {
+  OperandType type6(Type::TENSOR_FLOAT32, {2, 3, 2});
+  OperandType type7(Type::TENSOR_INT32, {3});
+  // Phase 1, operands
+  auto input2 = model->addOperand(&type6);
+  auto begin2 = model->addOperand(&type7);
+  auto size2 = model->addOperand(&type7);
+  auto output2 = model->addOperand(&type6);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input2, begin2, size2}, {output2});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input2, begin2, size2},
+    {output2});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_3(Model *model) {
+  OperandType type6(Type::TENSOR_FLOAT32, {2, 3, 2});
+  OperandType type7(Type::TENSOR_INT32, {3});
+  // Phase 1, operands
+  auto input2 = model->addOperand(&type6);
+  auto begin2 = model->addOperand(&type7);
+  auto size2 = model->addOperand(&type7);
+  auto output2 = model->addOperand(&type6);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input2, begin2, size2}, {output2});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input2, begin2, size2},
+    {output2});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_3(Model *model) {
+  OperandType type20(Type::TENSOR_FLOAT16, {2, 3, 2});
+  OperandType type7(Type::TENSOR_INT32, {3});
+  // Phase 1, operands
+  auto input2 = model->addOperand(&type20);
+  auto begin2 = model->addOperand(&type7);
+  auto size2 = model->addOperand(&type7);
+  auto output2 = model->addOperand(&type20);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input2, begin2, size2}, {output2});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input2, begin2, size2},
+    {output2});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_4(Model *model) {
+  OperandType type10(Type::TENSOR_FLOAT32, {3, 1, 1, 1});
+  OperandType type8(Type::TENSOR_FLOAT32, {4, 1, 1, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input3 = model->addOperand(&type8);
+  auto begin3 = model->addOperand(&type9);
+  auto size3 = model->addOperand(&type9);
+  auto output3 = model->addOperand(&type10);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input3, begin3, size3}, {output3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input3, begin3, size3},
+    {output3});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_4(Model *model) {
+  OperandType type10(Type::TENSOR_FLOAT32, {3, 1, 1, 1});
+  OperandType type8(Type::TENSOR_FLOAT32, {4, 1, 1, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input3 = model->addOperand(&type8);
+  auto begin3 = model->addOperand(&type9);
+  auto size3 = model->addOperand(&type9);
+  auto output3 = model->addOperand(&type10);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input3, begin3, size3}, {output3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input3, begin3, size3},
+    {output3});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_4(Model *model) {
+  OperandType type21(Type::TENSOR_FLOAT16, {4, 1, 1, 1});
+  OperandType type22(Type::TENSOR_FLOAT16, {3, 1, 1, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input3 = model->addOperand(&type21);
+  auto begin3 = model->addOperand(&type9);
+  auto size3 = model->addOperand(&type9);
+  auto output3 = model->addOperand(&type22);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input3, begin3, size3}, {output3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input3, begin3, size3},
+    {output3});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_5(Model *model) {
+  OperandType type11(Type::TENSOR_INT32, {3, 2, 3, 1});
+  OperandType type12(Type::TENSOR_INT32, {1, 1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input4 = model->addOperand(&type11);
+  auto begin4 = model->addOperand(&type9);
+  auto size4 = model->addOperand(&type9);
+  auto output4 = model->addOperand(&type12);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input4, begin4, size4}, {output4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input4, begin4, size4},
+    {output4});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_5(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_5(Model *model) {
+  OperandType type11(Type::TENSOR_INT32, {3, 2, 3, 1});
+  OperandType type12(Type::TENSOR_INT32, {1, 1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input4 = model->addOperand(&type11);
+  auto begin4 = model->addOperand(&type9);
+  auto size4 = model->addOperand(&type9);
+  auto output4 = model->addOperand(&type12);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input4, begin4, size4}, {output4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input4, begin4, size4},
+    {output4});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_5(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_5(Model *model) {
+  OperandType type11(Type::TENSOR_INT32, {3, 2, 3, 1});
+  OperandType type12(Type::TENSOR_INT32, {1, 1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input4 = model->addOperand(&type11);
+  auto begin4 = model->addOperand(&type9);
+  auto size4 = model->addOperand(&type9);
+  auto output4 = model->addOperand(&type12);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input4, begin4, size4}, {output4});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input4, begin4, size4},
+    {output4});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_5(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_6(Model *model) {
+  OperandType type11(Type::TENSOR_INT32, {3, 2, 3, 1});
+  OperandType type13(Type::TENSOR_INT32, {2, 1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input5 = model->addOperand(&type11);
+  auto begin5 = model->addOperand(&type9);
+  auto size5 = model->addOperand(&type9);
+  auto output5 = model->addOperand(&type13);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input5, begin5, size5}, {output5});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input5, begin5, size5},
+    {output5});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_6(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_6(Model *model) {
+  OperandType type11(Type::TENSOR_INT32, {3, 2, 3, 1});
+  OperandType type13(Type::TENSOR_INT32, {2, 1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input5 = model->addOperand(&type11);
+  auto begin5 = model->addOperand(&type9);
+  auto size5 = model->addOperand(&type9);
+  auto output5 = model->addOperand(&type13);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input5, begin5, size5}, {output5});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input5, begin5, size5},
+    {output5});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_6(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_6(Model *model) {
+  OperandType type11(Type::TENSOR_INT32, {3, 2, 3, 1});
+  OperandType type13(Type::TENSOR_INT32, {2, 1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input5 = model->addOperand(&type11);
+  auto begin5 = model->addOperand(&type9);
+  auto size5 = model->addOperand(&type9);
+  auto output5 = model->addOperand(&type13);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input5, begin5, size5}, {output5});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input5, begin5, size5},
+    {output5});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_6(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_7(Model *model) {
+  OperandType type14(Type::TENSOR_QUANT8_ASYMM, {3, 2, 3, 1}, 2.0f, 128);
+  OperandType type15(Type::TENSOR_QUANT8_ASYMM, {2, 1, 3, 1}, 2.0f, 128);
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input6 = model->addOperand(&type14);
+  auto begin6 = model->addOperand(&type9);
+  auto size6 = model->addOperand(&type9);
+  auto output6 = model->addOperand(&type15);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input6, begin6, size6}, {output6});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input6, begin6, size6},
+    {output6});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_7(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_7(Model *model) {
+  OperandType type14(Type::TENSOR_QUANT8_ASYMM, {3, 2, 3, 1}, 2.0f, 128);
+  OperandType type15(Type::TENSOR_QUANT8_ASYMM, {2, 1, 3, 1}, 2.0f, 128);
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input6 = model->addOperand(&type14);
+  auto begin6 = model->addOperand(&type9);
+  auto size6 = model->addOperand(&type9);
+  auto output6 = model->addOperand(&type15);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input6, begin6, size6}, {output6});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input6, begin6, size6},
+    {output6});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_7(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_7(Model *model) {
+  OperandType type14(Type::TENSOR_QUANT8_ASYMM, {3, 2, 3, 1}, 2.0f, 128);
+  OperandType type15(Type::TENSOR_QUANT8_ASYMM, {2, 1, 3, 1}, 2.0f, 128);
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input6 = model->addOperand(&type14);
+  auto begin6 = model->addOperand(&type9);
+  auto size6 = model->addOperand(&type9);
+  auto output6 = model->addOperand(&type15);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input6, begin6, size6}, {output6});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input6, begin6, size6},
+    {output6});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_7(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_8(Model *model) {
+  OperandType type11(Type::TENSOR_INT32, {3, 2, 3, 1});
+  OperandType type13(Type::TENSOR_INT32, {2, 1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input7 = model->addOperand(&type11);
+  auto begin7 = model->addOperand(&type9);
+  auto size7 = model->addOperand(&type9);
+  auto output7 = model->addOperand(&type13);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input7, begin7, size7}, {output7});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input7, begin7, size7},
+    {output7});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_relaxed_8(Model *model) {
+  OperandType type11(Type::TENSOR_INT32, {3, 2, 3, 1});
+  OperandType type13(Type::TENSOR_INT32, {2, 1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input7 = model->addOperand(&type11);
+  auto begin7 = model->addOperand(&type9);
+  auto size7 = model->addOperand(&type9);
+  auto output7 = model->addOperand(&type13);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input7, begin7, size7}, {output7});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input7, begin7, size7},
+    {output7});
+  // Phase 4: set relaxed execution
+  model->relaxComputationFloat32toFloat16(true);
+  assert(model->isValid());
+}
+
+inline bool is_ignored_relaxed_8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+void CreateModel_float16_8(Model *model) {
+  OperandType type11(Type::TENSOR_INT32, {3, 2, 3, 1});
+  OperandType type13(Type::TENSOR_INT32, {2, 1, 3, 1});
+  OperandType type9(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input7 = model->addOperand(&type11);
+  auto begin7 = model->addOperand(&type9);
+  auto size7 = model->addOperand(&type9);
+  auto output7 = model->addOperand(&type13);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_SLICE, {input7, begin7, size7}, {output7});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input7, begin7, size7},
+    {output7});
+  assert(model->isValid());
+}
+
+inline bool is_ignored_float16_8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/generated/tests/slice.mod.py.cpp b/nn/runtime/test/generated/tests/slice.mod.py.cpp
new file mode 100644
index 0000000..600affc
--- /dev/null
+++ b/nn/runtime/test/generated/tests/slice.mod.py.cpp
@@ -0,0 +1,155 @@
+// clang-format off
+// Generated file (from: slice.mod.py). Do not edit
+#include "../../TestGenerated.h"
+
+namespace slice {
+// Generated slice test
+#include "generated/examples/slice.example.cpp"
+// Generated model constructor
+#include "generated/models/slice.model.cpp"
+} // namespace slice
+
+TEST_F(GeneratedTests, slice) {
+    execute(slice::CreateModel,
+            slice::is_ignored,
+            slice::examples);
+}
+
+TEST_F(GeneratedTests, slice_relaxed) {
+    execute(slice::CreateModel_relaxed,
+            slice::is_ignored_relaxed,
+            slice::examples_relaxed);
+}
+
+TEST_F(GeneratedTests, slice_float16) {
+    execute(slice::CreateModel_float16,
+            slice::is_ignored_float16,
+            slice::examples_float16);
+}
+
+TEST_F(GeneratedTests, slice_2) {
+    execute(slice::CreateModel_2,
+            slice::is_ignored_2,
+            slice::examples_2);
+}
+
+TEST_F(GeneratedTests, slice_relaxed_2) {
+    execute(slice::CreateModel_relaxed_2,
+            slice::is_ignored_relaxed_2,
+            slice::examples_relaxed_2);
+}
+
+TEST_F(GeneratedTests, slice_float16_2) {
+    execute(slice::CreateModel_float16_2,
+            slice::is_ignored_float16_2,
+            slice::examples_float16_2);
+}
+
+TEST_F(GeneratedTests, slice_3) {
+    execute(slice::CreateModel_3,
+            slice::is_ignored_3,
+            slice::examples_3);
+}
+
+TEST_F(GeneratedTests, slice_relaxed_3) {
+    execute(slice::CreateModel_relaxed_3,
+            slice::is_ignored_relaxed_3,
+            slice::examples_relaxed_3);
+}
+
+TEST_F(GeneratedTests, slice_float16_3) {
+    execute(slice::CreateModel_float16_3,
+            slice::is_ignored_float16_3,
+            slice::examples_float16_3);
+}
+
+TEST_F(GeneratedTests, slice_4) {
+    execute(slice::CreateModel_4,
+            slice::is_ignored_4,
+            slice::examples_4);
+}
+
+TEST_F(GeneratedTests, slice_relaxed_4) {
+    execute(slice::CreateModel_relaxed_4,
+            slice::is_ignored_relaxed_4,
+            slice::examples_relaxed_4);
+}
+
+TEST_F(GeneratedTests, slice_float16_4) {
+    execute(slice::CreateModel_float16_4,
+            slice::is_ignored_float16_4,
+            slice::examples_float16_4);
+}
+
+TEST_F(GeneratedTests, slice_5) {
+    execute(slice::CreateModel_5,
+            slice::is_ignored_5,
+            slice::examples_5);
+}
+
+TEST_F(GeneratedTests, slice_relaxed_5) {
+    execute(slice::CreateModel_relaxed_5,
+            slice::is_ignored_relaxed_5,
+            slice::examples_relaxed_5);
+}
+
+TEST_F(GeneratedTests, slice_float16_5) {
+    execute(slice::CreateModel_float16_5,
+            slice::is_ignored_float16_5,
+            slice::examples_float16_5);
+}
+
+TEST_F(GeneratedTests, slice_6) {
+    execute(slice::CreateModel_6,
+            slice::is_ignored_6,
+            slice::examples_6);
+}
+
+TEST_F(GeneratedTests, slice_relaxed_6) {
+    execute(slice::CreateModel_relaxed_6,
+            slice::is_ignored_relaxed_6,
+            slice::examples_relaxed_6);
+}
+
+TEST_F(GeneratedTests, slice_float16_6) {
+    execute(slice::CreateModel_float16_6,
+            slice::is_ignored_float16_6,
+            slice::examples_float16_6);
+}
+
+TEST_F(GeneratedTests, slice_7) {
+    execute(slice::CreateModel_7,
+            slice::is_ignored_7,
+            slice::examples_7);
+}
+
+TEST_F(GeneratedTests, slice_relaxed_7) {
+    execute(slice::CreateModel_relaxed_7,
+            slice::is_ignored_relaxed_7,
+            slice::examples_relaxed_7);
+}
+
+TEST_F(GeneratedTests, slice_float16_7) {
+    execute(slice::CreateModel_float16_7,
+            slice::is_ignored_float16_7,
+            slice::examples_float16_7);
+}
+
+TEST_F(GeneratedTests, slice_8) {
+    execute(slice::CreateModel_8,
+            slice::is_ignored_8,
+            slice::examples_8);
+}
+
+TEST_F(GeneratedTests, slice_relaxed_8) {
+    execute(slice::CreateModel_relaxed_8,
+            slice::is_ignored_relaxed_8,
+            slice::examples_relaxed_8);
+}
+
+TEST_F(GeneratedTests, slice_float16_8) {
+    execute(slice::CreateModel_float16_8,
+            slice::is_ignored_float16_8,
+            slice::examples_float16_8);
+}
+
diff --git a/nn/runtime/test/generated/vts_models/slice.model.cpp b/nn/runtime/test/generated/vts_models/slice.model.cpp
new file mode 100644
index 0000000..79141f7
--- /dev/null
+++ b/nn/runtime/test/generated/vts_models/slice.model.cpp
@@ -0,0 +1,1666 @@
+// clang-format off
+// Generated file (from: slice.mod.py). Do not edit
+// Create the model
+Model createTestModel() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_2() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2, 3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {1, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_2(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 3, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {2, 3, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_3() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2, 3, 2},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {2, 3, 2},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_3(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_4() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 1, 1, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3, 1, 1, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_4() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {4, 1, 1, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT32,
+            .dimensions = {3, 1, 1, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_4() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {4, 1, 1, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_FLOAT16,
+            .dimensions = {3, 1, 1, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_4(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_5() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_5(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_5() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_5(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_5() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {1, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_5(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_6() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_6(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_6() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_6(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_6() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_6(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_7() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 2.0f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 2.0f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_7(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_7() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 2.0f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 2.0f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_7(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_7() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 2.0f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_QUANT8_ASYMM,
+            .dimensions = {2, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 2.0f,
+            .zeroPoint = 128,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_7(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_relaxed_8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+        .relaxComputationFloat32toFloat16 = true,
+    };
+}
+
+inline bool is_ignored_relaxed_8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
+// Create the model
+Model createTestModel_float16_8() {
+    const std::vector<Operand> operands = {
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {3, 2, 3, 1},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {4},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_INPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        },
+        {
+            .type = OperandType::TENSOR_INT32,
+            .dimensions = {2, 1, 3, 1},
+            .numberOfConsumers = 0,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = OperandLifeTime::MODEL_OUTPUT,
+            .location = {.poolIndex = 0, .offset = 0, .length = 0},
+        }
+    };
+
+    const std::vector<Operation> operations = {
+        {
+            .type = OperationType::SLICE,
+            .inputs = {0, 1, 2},
+            .outputs = {3},
+        }
+    };
+
+    const std::vector<uint32_t> inputIndexes = {0, 1, 2};
+    const std::vector<uint32_t> outputIndexes = {3};
+    std::vector<uint8_t> operandValues = {};
+    const std::vector<hidl_memory> pools = {};
+
+    return {
+        .operands = operands,
+        .operations = operations,
+        .inputIndexes = inputIndexes,
+        .outputIndexes = outputIndexes,
+        .operandValues = operandValues,
+        .pools = pools,
+    };
+}
+
+inline bool is_ignored_float16_8(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
+
diff --git a/nn/runtime/test/specs/V1_2/slice.mod.py b/nn/runtime/test/specs/V1_2/slice.mod.py
new file mode 100644
index 0000000..6326f2e
--- /dev/null
+++ b/nn/runtime/test/specs/V1_2/slice.mod.py
@@ -0,0 +1,107 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import collections
+
+TestCase = collections.namedtuple("TestCase", [
+    "inp", "inp_data", "begin", "begin_data", "size", "size_data", "output",
+    "output_data"
+])
+
+test_cases = [
+    TestCase(
+        inp=Input("input", "TENSOR_FLOAT32", "{4}"),
+        inp_data=[1, 2, 3, 4],
+        begin=Input("begin", "TENSOR_INT32", "{1}"),
+        begin_data=[1],
+        size=Input("size", "TENSOR_INT32", "{1}"),
+        size_data=[2],
+        output=Output("output", "TENSOR_FLOAT32", "{2}"),
+        output_data=[2, 3]),
+    TestCase(
+        inp=Input("input", "TENSOR_FLOAT32", "{2,3}"),
+        inp_data=[1, 2, 3, 4, 5, 6],
+        begin=Input("begin", "TENSOR_INT32", "{2}"),
+        begin_data=[1, 0],
+        size=Input("size", "TENSOR_INT32", "{2}"),
+        size_data=[1, 2],
+        output=Output("output", "TENSOR_FLOAT32", "{1, 2}"),
+        output_data=[4, 5]),
+    TestCase(
+        inp=Input("input", "TENSOR_FLOAT32", "{2,3,2}"),
+        inp_data=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
+        begin=Input("begin", "TENSOR_INT32", "{3}"),
+        begin_data=[0, 0, 0],
+        size=Input("size", "TENSOR_INT32", "{3}"),
+        size_data=[2, 3, 2],
+        output=Output("output", "TENSOR_FLOAT32", "{2, 3, 2}"),
+        output_data=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]),
+    TestCase(
+        inp=Input("input", "TENSOR_FLOAT32", "{4, 1, 1, 1}"),
+        inp_data=[1, 2, 3, 4],
+        begin=Input("begin", "TENSOR_INT32", "{4}"),
+        begin_data=[1, 0, 0, 0],
+        size=Input("size", "TENSOR_INT32", "{4}"),
+        size_data=[3, 1, 1, 1],
+        output=Output("output", "TENSOR_FLOAT32", "{3, 1, 1, 1}"),
+        output_data=[2, 3, 4]),
+    TestCase(
+        inp=Input("input", "TENSOR_INT32", "{3, 2, 3, 1}"),
+        inp_data=[1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],
+        begin=Input("begin", "TENSOR_INT32", "{4}"),
+        begin_data=[1, 0, 0, 0],
+        size=Input("size", "TENSOR_INT32", "{4}"),
+        size_data=[1, 1, 3, 1],
+        output=Output("output", "TENSOR_INT32", "{1, 1, 3, 1}"),
+        output_data=[3, 3, 3]),
+    TestCase(
+        inp=Input("input", "TENSOR_INT32", "{3, 2, 3, 1}"),
+        inp_data=[1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],
+        begin=Input("begin", "TENSOR_INT32", "{4}"),
+        begin_data=[1, 0, 0, 0],
+        size=Input("size", "TENSOR_INT32", "{4}"),
+        size_data=[2, 1, 3, 1],
+        output=Output("output", "TENSOR_INT32", "{2, 1, 3, 1}"),
+        output_data=[3, 3, 3, 5, 5, 5]),
+    TestCase(
+        inp=Input("input", "TENSOR_QUANT8_ASYMM", "{3, 2, 3, 1}, 2.0, 128"),
+        inp_data=[1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],
+        begin=Input("begin", "TENSOR_INT32", "{4}"),
+        begin_data=[1, 0, 0, 0],
+        size=Input("size", "TENSOR_INT32", "{4}"),
+        size_data=[2, 1, 3, 1],
+        output=Output("output", "TENSOR_QUANT8_ASYMM", "{2, 1, 3, 1}, 2.0, 128"),
+        output_data=[3, 3, 3, 5, 5, 5]),
+    TestCase(
+        inp=Input("input", "TENSOR_INT32", "{3, 2, 3, 1}"),
+        inp_data=[1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],
+        begin=Input("begin", "TENSOR_INT32", "{4}"),
+        begin_data=[1, 0, 0, 0],
+        size=Input("size", "TENSOR_INT32", "{4}"),
+        size_data=[2, 1, -1, 1],
+        output=Output("output", "TENSOR_INT32", "{2, 1, 3, 1}"),
+        output_data=[3, 3, 3, 5, 5, 5]),
+]
+
+for test_case in test_cases:
+  model = Model().Operation("SLICE", test_case.inp, test_case.begin,
+                            test_case.size).To(test_case.output)
+  Example({
+      test_case.inp: test_case.inp_data,
+      test_case.begin: test_case.begin_data,
+      test_case.size: test_case.size_data,
+      test_case.output: test_case.output_data,
+  },
+          model=model).AddVariations("relaxed", "float16")